Commit 8472e778 authored by Antoine Ténart's avatar Antoine Ténart Committed by Herbert Xu

crypto: inside-secure - move request dequeueing into a workqueue

This patch moves the request dequeueing into a workqueue to improve the
coalescing of interrupts when sending requests to the engine; as the
engine is capable of having one single interrupt for n requests sent.
Using a workqueue allows to send more request at once.
Suggested-by: default avatarOfer Heifetz <oferh@marvell.com>
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 5290ad6e
...@@ -429,8 +429,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -429,8 +429,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request; struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
priv->ring[ring].need_dequeue = false;
do { do {
spin_lock_bh(&priv->ring[ring].queue_lock); spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue); backlog = crypto_get_backlog(&priv->ring[ring].queue);
...@@ -445,8 +443,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -445,8 +443,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
spin_lock_bh(&priv->ring[ring].queue_lock); spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, req); crypto_enqueue_request(&priv->ring[ring].queue, req);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
priv->ring[ring].need_dequeue = true;
goto finalize; goto finalize;
} }
...@@ -455,7 +451,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -455,7 +451,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
if (ret) { if (ret) {
kfree(request); kfree(request);
req->complete(req, ret); req->complete(req, ret);
priv->ring[ring].need_dequeue = true;
goto finalize; goto finalize;
} }
...@@ -471,9 +466,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -471,9 +466,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
} while (nreq++ < EIP197_MAX_BATCH_SZ); } while (nreq++ < EIP197_MAX_BATCH_SZ);
finalize: finalize:
if (nreq == EIP197_MAX_BATCH_SZ) if (!nreq)
priv->ring[ring].need_dequeue = true;
else if (!nreq)
return; return;
spin_lock_bh(&priv->ring[ring].lock); spin_lock_bh(&priv->ring[ring].lock);
...@@ -628,13 +621,18 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ...@@ -628,13 +621,18 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
static void safexcel_handle_result_work(struct work_struct *work) static void safexcel_handle_result_work(struct work_struct *work)
{ {
struct safexcel_work_data *data = struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work); container_of(work, struct safexcel_work_data, result_work);
struct safexcel_crypto_priv *priv = data->priv; struct safexcel_crypto_priv *priv = data->priv;
safexcel_handle_result_descriptor(priv, data->ring); safexcel_handle_result_descriptor(priv, data->ring);
}
static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
if (priv->ring[data->ring].need_dequeue) safexcel_dequeue(data->priv, data->ring);
safexcel_dequeue(data->priv, data->ring);
} }
struct safexcel_ring_irq_data { struct safexcel_ring_irq_data {
...@@ -665,7 +663,10 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) ...@@ -665,7 +663,10 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/ */
dev_err(priv->dev, "RDR: fatal error."); dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) { } else if (likely(stat & EIP197_xDR_THRESH)) {
queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.result_work);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
} }
/* ACK the interrupts */ /* ACK the interrupts */
...@@ -846,7 +847,9 @@ static int safexcel_probe(struct platform_device *pdev) ...@@ -846,7 +847,9 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i; priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work); INIT_WORK(&priv->ring[i].work_data.result_work,
safexcel_handle_result_work);
INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i); snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
......
...@@ -459,6 +459,7 @@ struct safexcel_config { ...@@ -459,6 +459,7 @@ struct safexcel_config {
struct safexcel_work_data { struct safexcel_work_data {
struct work_struct work; struct work_struct work;
struct work_struct result_work;
struct safexcel_crypto_priv *priv; struct safexcel_crypto_priv *priv;
int ring; int ring;
}; };
...@@ -489,7 +490,6 @@ struct safexcel_crypto_priv { ...@@ -489,7 +490,6 @@ struct safexcel_crypto_priv {
/* queue */ /* queue */
struct crypto_queue queue; struct crypto_queue queue;
spinlock_t queue_lock; spinlock_t queue_lock;
bool need_dequeue;
} ring[EIP197_MAX_RINGS]; } ring[EIP197_MAX_RINGS];
}; };
......
...@@ -358,8 +358,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -358,8 +358,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS) if (enq_ret != -EINPROGRESS)
*ret = enq_ret; *ret = enq_ret;
if (!priv->ring[ring].need_dequeue) queue_work(priv->ring[ring].workqueue,
safexcel_dequeue(priv, ring); &priv->ring[ring].work_data.work);
*should_complete = false; *should_complete = false;
...@@ -448,8 +448,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) ...@@ -448,8 +448,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base); crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue) queue_work(priv->ring[ring].workqueue,
safexcel_dequeue(priv, ring); &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion); wait_for_completion_interruptible(&result.completion);
...@@ -495,8 +495,8 @@ static int safexcel_aes(struct skcipher_request *req, ...@@ -495,8 +495,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue) queue_work(priv->ring[ring].workqueue,
safexcel_dequeue(priv, ring); &priv->ring[ring].work_data.work);
return ret; return ret;
} }
......
...@@ -381,8 +381,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -381,8 +381,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS) if (enq_ret != -EINPROGRESS)
*ret = enq_ret; *ret = enq_ret;
if (!priv->ring[ring].need_dequeue) queue_work(priv->ring[ring].workqueue,
safexcel_dequeue(priv, ring); &priv->ring[ring].work_data.work);
*should_complete = false; *should_complete = false;
...@@ -470,8 +470,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) ...@@ -470,8 +470,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base); crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue) queue_work(priv->ring[ring].workqueue,
safexcel_dequeue(priv, ring); &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion); wait_for_completion_interruptible(&result.completion);
...@@ -556,8 +556,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) ...@@ -556,8 +556,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue) queue_work(priv->ring[ring].workqueue,
safexcel_dequeue(priv, ring); &priv->ring[ring].work_data.work);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment