Commit 1984aaee authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - fix return code in completion callbacks

Modify drive to provide a valid errno (and not the HW error ID)
to the user, via completion callbacks.

A "valid errno" is currently not explicitly mentioned in the docs,
however the error code is expected to match the one returned by the
generic SW implementation.

Note: in most error cases caam/qi and caam/qi2 returned -EIO; align all
caam drivers to return -EINVAL.

While here, ratelimit prints triggered by fuzz testing, such that
console is not flooded.
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarIuliana Prodan <iuliana.prodan@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 51fab3d7
......@@ -930,19 +930,20 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct aead_request *req = context;
struct aead_edesc *edesc;
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
kfree(edesc);
aead_request_complete(req, err);
aead_request_complete(req, ecode);
}
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
......@@ -950,25 +951,20 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct aead_request *req = context;
struct aead_edesc *edesc;
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
/*
* verify hw auth check passed else return -EBADMSG
*/
if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
err = -EBADMSG;
kfree(edesc);
aead_request_complete(req, err);
aead_request_complete(req, ecode);
}
static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
......@@ -978,13 +974,14 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
skcipher_unmap(jrdev, edesc, req);
......@@ -1008,7 +1005,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
skcipher_request_complete(req, err);
skcipher_request_complete(req, ecode);
}
static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
......@@ -1018,12 +1015,13 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
skcipher_unmap(jrdev, edesc, req);
......@@ -1047,7 +1045,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
skcipher_request_complete(req, err);
skcipher_request_complete(req, ecode);
}
/*
......
......@@ -884,20 +884,8 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
qidev = caam_ctx->qidev;
if (unlikely(status)) {
u32 ssrc = status & JRSTA_SSRC_MASK;
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
caam_jr_strstatus(qidev, status);
/*
* verify hw auth check passed else return -EBADMSG
*/
if (ssrc == JRSTA_SSRC_CCB_ERROR &&
err_id == JRSTA_CCBERR_ERRID_ICVCHK)
ecode = -EBADMSG;
else
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_jr_strstatus(qidev, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
aead_unmap(qidev, edesc, aead_req);
......@@ -1190,13 +1178,14 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
if (status)
caam_jr_strstatus(qidev, status);
ecode = caam_jr_strstatus(qidev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
......@@ -1215,7 +1204,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, status);
skcipher_request_complete(req, ecode);
}
static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
......
......@@ -1228,10 +1228,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
......@@ -1251,17 +1249,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
/*
* verify hw auth check passed else return -EBADMSG
*/
if ((status & JRSTA_CCBERR_ERRID_MASK) ==
JRSTA_CCBERR_ERRID_ICVCHK)
ecode = -EBADMSG;
else
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
......@@ -1353,10 +1342,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
......@@ -1391,10 +1378,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
......@@ -3095,10 +3080,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
if (err)
caam_qi2_strstatus(res->dev, err);
res->err = err;
res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
complete(&res->completion);
}
......@@ -3283,10 +3265,8 @@ static void ahash_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
memcpy(req->result, state->caam_ctx, digestsize);
......@@ -3311,10 +3291,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
switch_buf(state);
......@@ -3344,10 +3322,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
memcpy(req->result, state->caam_ctx, digestsize);
......@@ -3372,10 +3348,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
if (unlikely(status)) {
caam_qi2_strstatus(ctx->dev, status);
ecode = -EIO;
}
if (unlikely(status))
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
switch_buf(state);
......@@ -4701,7 +4675,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
if (unlikely(fd_err))
dev_err(priv->dev, "FD error: %08x\n", fd_err);
dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
/*
* FD[ADDR] is guaranteed to be valid, irrespective of errors reported
......
......@@ -584,12 +584,13 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
memcpy(req->result, state->caam_ctx, digestsize);
......@@ -599,7 +600,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
req->base.complete(&req->base, err);
req->base.complete(&req->base, ecode);
}
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
......@@ -611,12 +612,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
switch_buf(state);
......@@ -630,7 +632,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
req->base.complete(&req->base, err);
req->base.complete(&req->base, ecode);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
......@@ -642,12 +644,13 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
memcpy(req->result, state->caam_ctx, digestsize);
......@@ -657,7 +660,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
req->base.complete(&req->base, err);
req->base.complete(&req->base, ecode);
}
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
......@@ -669,12 +672,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
switch_buf(state);
......@@ -688,7 +692,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
req->base.complete(&req->base, err);
req->base.complete(&req->base, ecode);
}
/*
......
......@@ -107,9 +107,10 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
int ecode = 0;
if (err)
caam_jr_strstatus(dev, err);
ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
......@@ -117,7 +118,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
akcipher_request_complete(req, err);
akcipher_request_complete(req, ecode);
}
static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
......@@ -125,9 +126,10 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
int ecode = 0;
if (err)
caam_jr_strstatus(dev, err);
ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
......@@ -135,7 +137,7 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
akcipher_request_complete(req, err);
akcipher_request_complete(req, ecode);
}
static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
......@@ -143,9 +145,10 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
int ecode = 0;
if (err)
caam_jr_strstatus(dev, err);
ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
......@@ -153,7 +156,7 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
akcipher_request_complete(req, err);
akcipher_request_complete(req, ecode);
}
static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
......@@ -161,9 +164,10 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
int ecode = 0;
if (err)
caam_jr_strstatus(dev, err);
ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
......@@ -171,7 +175,7 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
akcipher_request_complete(req, err);
akcipher_request_complete(req, ecode);
}
/**
......
......@@ -211,7 +211,7 @@ static const char * const rng_err_id_list[] = {
"Secure key generation",
};
static void report_ccb_status(struct device *jrdev, const u32 status,
static int report_ccb_status(struct device *jrdev, const u32 status,
const char *error)
{
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
......@@ -248,21 +248,26 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
* CCB ICV check failures are part of normal operation life;
* we leave the upper layers to do what they want with them.
*/
if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
status, error, idx_str, idx,
cha_str, cha_err_code,
if (err_id == JRSTA_CCBERR_ERRID_ICVCHK)
return -EBADMSG;
dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status,
error, idx_str, idx, cha_str, cha_err_code,
err_str, err_err_code);
return -EINVAL;
}
static void report_jump_status(struct device *jrdev, const u32 status,
static int report_jump_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
return -EINVAL;
}
static void report_deco_status(struct device *jrdev, const u32 status,
static int report_deco_status(struct device *jrdev, const u32 status,
const char *error)
{
u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
......@@ -289,9 +294,11 @@ static void report_deco_status(struct device *jrdev, const u32 status,
dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
status, error, idx_str, idx, err_str, err_err_code);
return -EINVAL;
}
static void report_qi_status(struct device *qidev, const u32 status,
static int report_qi_status(struct device *qidev, const u32 status,
const char *error)
{
u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
......@@ -310,26 +317,32 @@ static void report_qi_status(struct device *qidev, const u32 status,
dev_err(qidev, "%08x: %s: %s%s\n",
status, error, err_str, err_err_code);
return -EINVAL;
}
static void report_jr_status(struct device *jrdev, const u32 status,
static int report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
return -EINVAL;
}
static void report_cond_code_status(struct device *jrdev, const u32 status,
static int report_cond_code_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
return -EINVAL;
}
void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
int (*report_ssed)(struct device *jrdev, const u32 status,
const char *error);
const char *error;
} status_src[16] = {
......@@ -358,11 +371,14 @@ void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
* Otherwise print the error source name.
*/
if (status_src[ssrc].report_ssed)
status_src[ssrc].report_ssed(jrdev, status, error);
else if (error)
return status_src[ssrc].report_ssed(jrdev, status, error);
if (error)
dev_err(jrdev, "%d: %s\n", ssrc, error);
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
return -EINVAL;
}
EXPORT_SYMBOL(caam_strstatus);
......
......@@ -12,7 +12,7 @@
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
int caam_strstatus(struct device *dev, u32 status, bool qi_v2);
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
......
......@@ -15,13 +15,14 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
struct split_key_result *res = context;
int ecode = 0;
dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
if (err)
caam_jr_strstatus(dev, err);
ecode = caam_jr_strstatus(dev, err);
res->err = err;
res->err = ecode;
complete(&res->completion);
}
......
......@@ -577,7 +577,8 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
if (ssrc != JRSTA_SSRC_CCB_ERROR ||
err_id != JRSTA_CCBERR_ERRID_ICVCHK)
dev_err(qidev, "Error: %#x in CAAM response FD\n",
dev_err_ratelimited(qidev,
"Error: %#x in CAAM response FD\n",
status);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment