Commit 89332590 authored by Antoine Tenart's avatar Antoine Tenart Committed by Herbert Xu

crypto: inside-secure - do not rely on the hardware last bit for result descriptors

When performing a transformation the hardware is given result
descriptors to save the result data. Those result descriptors are
batched using a 'first' and a 'last' bit. There are cases were more
descriptors than needed are given to the engine, leading to the engine
only using some of them, and not setting the last bit on the last
descriptor we gave. This causes issues were the driver and the hardware
aren't in sync anymore about the number of result descriptors given (as
the driver do not give a pool of descriptor to use for any
transformation, but a pool of descriptors to use *per* transformation).

This patch fixes it by attaching the number of given result descriptors
to the requests, and by using this number instead of the 'last' bit
found on the descriptors to process them.
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 583d7e19
...@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx { ...@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
struct safexcel_cipher_req { struct safexcel_cipher_req {
enum safexcel_cipher_direction direction; enum safexcel_cipher_direction direction;
/* Number of result descriptors associated to the request */
unsigned int rdescs;
bool needs_inv; bool needs_inv;
}; };
...@@ -351,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -351,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0; *ret = 0;
do { if (unlikely(!sreq->rdescs))
return 0;
while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
dev_err(priv->dev, dev_err(priv->dev,
...@@ -364,7 +369,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -364,7 +369,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = safexcel_rdesc_check_errors(priv, rdesc); *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++; ndesc++;
} while (!rdesc->last_seg); }
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
...@@ -502,6 +507,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -502,6 +507,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring, int ring,
struct crypto_async_request *base, struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret) bool *should_complete, int *ret)
{ {
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
...@@ -510,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -510,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0; *ret = 0;
do { if (unlikely(!sreq->rdescs))
return 0;
while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
dev_err(priv->dev, dev_err(priv->dev,
...@@ -523,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -523,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = safexcel_rdesc_check_errors(priv, rdesc); *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++; ndesc++;
} while (!rdesc->last_seg); }
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
...@@ -566,7 +575,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, ...@@ -566,7 +575,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
if (sreq->needs_inv) { if (sreq->needs_inv) {
sreq->needs_inv = false; sreq->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async, err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret); should_complete, ret);
} else { } else {
err = safexcel_handle_req_result(priv, ring, async, req->src, err = safexcel_handle_req_result(priv, ring, async, req->src,
...@@ -607,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, ...@@ -607,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
if (sreq->needs_inv) { if (sreq->needs_inv) {
sreq->needs_inv = false; sreq->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async, err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret); should_complete, ret);
} else { } else {
err = safexcel_handle_req_result(priv, ring, async, req->src, err = safexcel_handle_req_result(priv, ring, async, req->src,
...@@ -653,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, ...@@ -653,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
ret = safexcel_send_req(async, ring, sreq, req->src, ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv, req->dst, req->cryptlen, 0, 0, req->iv,
commands, results); commands, results);
sreq->rdescs = *results;
return ret; return ret;
} }
...@@ -675,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, ...@@ -675,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
req->cryptlen, req->assoclen, req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv, crypto_aead_authsize(tfm), req->iv,
commands, results); commands, results);
sreq->rdescs = *results;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment