Commit 7e0880b9 authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - add Derived Key Protocol (DKP) support

Offload split key generation in CAAM engine, using DKP.
DKP is supported starting with Era 6.

Note that the way assoclen is transmitted from the job descriptor
to the shared descriptor changes - DPOVRD register is used instead
of MATH3 (where available), since DKP protocol thrashes the MATH
registers.

The replacement of MDHA split key generation with DKP has the side
effect of the crypto engine writing the authentication key, and thus
the DMA mapping direction for the buffer holding the key has to change
from DMA_TO_DEVICE to DMA_BIDIRECTIONAL.
There are two cases:
-key is inlined in descriptor - descriptor buffer mapping changes
-key is referenced - key buffer mapping changes
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 9fe712df
...@@ -108,6 +108,7 @@ struct caam_ctx { ...@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma; dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma; dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma; dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev; struct device *jrdev;
struct alginfo adata; struct alginfo adata;
struct alginfo cdata; struct alginfo cdata;
...@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ...@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{ {
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc; u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad; ctx->adata.keylen_pad;
...@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ...@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */ /* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* /*
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
...@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ...@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */ /* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
return 0; return 0;
} }
...@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0; u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL; u32 *desc, *nonce = NULL;
u32 inl_mask; u32 inl_mask;
...@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
false); false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
skip_enc: skip_enc:
/* /*
...@@ -266,9 +271,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -266,9 +271,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686, ctx->authsize, alg->caam.geniv, is_rfc3686,
nonce, ctx1_iv_off, false); nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv) if (!alg->caam.geniv)
goto skip_givenc; goto skip_givenc;
...@@ -300,9 +305,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -300,9 +305,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx->authsize, is_rfc3686, nonce,
ctx1_iv_off, false); ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
skip_givenc: skip_givenc:
return 0; return 0;
...@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) ...@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* /*
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
...@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) ...@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
return 0; return 0;
} }
...@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) ...@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* /*
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
...@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) ...@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
return 0; return 0;
} }
...@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) ...@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* /*
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
...@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) ...@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
return 0; return 0;
} }
...@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead, ...@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{ {
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys; struct crypto_authenc_keys keys;
int ret = 0; int ret = 0;
...@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead, ...@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif #endif
/*
* If DKP is supported, use it in the shared descriptor to generate
* the split key.
*/
if (ctrlpriv->era >= 6) {
ctx->adata.keylen = keys.authkeylen;
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
OP_ALG_ALGSEL_MASK);
if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
}
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE - keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen); keys.enckeylen);
...@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead, ...@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */ /* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE); keys.enckeylen, ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1); ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif #endif
skip_split_key:
ctx->cdata.keylen = keys.enckeylen; ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead); return aead_set_sh_desc(aead);
badkey: badkey:
...@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead, ...@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif #endif
memcpy(ctx->key, key, keylen); memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen; ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead); return gcm_set_sh_desc(aead);
...@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, ...@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/ */
ctx->cdata.keylen = keylen - 4; ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE); ctx->dir);
return rfc4106_set_sh_desc(aead); return rfc4106_set_sh_desc(aead);
} }
...@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, ...@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/ */
ctx->cdata.keylen = keylen - 4; ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE); ctx->dir);
return rfc4543_set_sh_desc(aead); return rfc4543_set_sh_desc(aead);
} }
...@@ -656,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -656,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off); ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */ /* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off); ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */ /* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc; desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off); ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
return 0; return 0;
} }
...@@ -697,13 +726,13 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -697,13 +726,13 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */ /* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
return 0; return 0;
} }
...@@ -975,9 +1004,6 @@ static void init_aead_job(struct aead_request *req, ...@@ -975,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma, append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize, req->assoclen + req->cryptlen - authsize,
out_options); out_options);
/* REG3 = assoclen */
append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
} }
static void init_gcm_job(struct aead_request *req, static void init_gcm_job(struct aead_request *req,
...@@ -992,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req, ...@@ -992,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req,
unsigned int last; unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt); init_aead_job(req, edesc, all_contig, encrypt);
append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */ /* BUG This should not be specific to generic GCM. */
last = 0; last = 0;
...@@ -1018,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req, ...@@ -1018,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead); struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128); OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686; const bool is_rfc3686 = alg->caam.rfc3686;
...@@ -1041,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req, ...@@ -1041,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt); init_aead_job(req, edesc, all_contig, encrypt);
/*
* {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
* having DPOVRD as destination.
*/
if (ctrlpriv->era < 3)
append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
else
append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize, append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB | LDST_CLASS_1_CCB |
...@@ -3224,9 +3261,11 @@ struct caam_crypto_alg { ...@@ -3224,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam; struct caam_alg_entry caam;
}; };
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc(); ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) { if (IS_ERR(ctx->jrdev)) {
...@@ -3234,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) ...@@ -3234,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev); return PTR_ERR(ctx->jrdev);
} }
priv = dev_get_drvdata(ctx->jrdev->parent);
if (priv->era >= 6 && uses_dkp)
ctx->dir = DMA_BIDIRECTIONAL;
else
ctx->dir = DMA_TO_DEVICE;
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx, offsetof(struct caam_ctx,
sh_desc_enc_dma), sh_desc_enc_dma),
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) { if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
...@@ -3265,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) ...@@ -3265,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg); container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm); struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
return caam_init_common(ctx, &caam_alg->caam); return caam_init_common(ctx, &caam_alg->caam, false);
} }
static int caam_aead_init(struct crypto_aead *tfm) static int caam_aead_init(struct crypto_aead *tfm)
...@@ -3275,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm) ...@@ -3275,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead); container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm); struct caam_ctx *ctx = crypto_aead_ctx(tfm);
return caam_init_common(ctx, &caam_alg->caam); return caam_init_common(ctx, &caam_alg->caam,
alg->setkey == aead_setkey);
} }
static void caam_exit_common(struct caam_ctx *ctx) static void caam_exit_common(struct caam_ctx *ctx)
{ {
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma), offsetof(struct caam_ctx, sh_desc_enc_dma),
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
} }
......
...@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type) ...@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption. * (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction * @desc: pointer to buffer used for descriptor construction
* @adata: pointer to authentication transform definitions. Note that since a * @adata: pointer to authentication transform definitions.
* split key is to be used, the size of the split key itself is * A split key is required for SEC Era < 6; the size of the split key
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, * is specified in this case. Valid algorithm values - one of
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
* with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full) * @icvsize: integrity check value (ICV) size (truncated or full)
* * @era: SEC Era
* Note: Requires an MDHA split key.
*/ */
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize) unsigned int icvsize, int era)
{ {
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
...@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, ...@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */ /* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD); JUMP_COND_SHRD);
if (era < 6) {
if (adata->key_inline) if (adata->key_inline)
append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, append_key_as_imm(desc, adata->key_virt,
adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | adata->keylen_pad, adata->keylen,
CLASS_2 | KEY_DEST_MDHA_SPLIT |
KEY_ENC); KEY_ENC);
else else
append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | append_key(desc, adata->key_dma, adata->keylen,
KEY_DEST_MDHA_SPLIT | KEY_ENC); CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
} else {
append_proto_dkp(desc, adata);
}
set_jump_tgt_here(desc, key_jump_cmd); set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */ /* assoclen + cryptlen = seqinlen */
...@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); ...@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption. * (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction * @desc: pointer to buffer used for descriptor construction
* @adata: pointer to authentication transform definitions. Note that since a * @adata: pointer to authentication transform definitions.
* split key is to be used, the size of the split key itself is * A split key is required for SEC Era < 6; the size of the split key
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, * is specified in this case. Valid algorithm values - one of
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
* with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full) * @icvsize: integrity check value (ICV) size (truncated or full)
* * @era: SEC Era
* Note: Requires an MDHA split key.
*/ */
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize) unsigned int icvsize, int era)
{ {
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
...@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, ...@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */ /* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD); JUMP_COND_SHRD);
if (era < 6) {
if (adata->key_inline) if (adata->key_inline)
append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, append_key_as_imm(desc, adata->key_virt,
adata->keylen, CLASS_2 | adata->keylen_pad, adata->keylen,
KEY_DEST_MDHA_SPLIT | KEY_ENC); CLASS_2 | KEY_DEST_MDHA_SPLIT |
KEY_ENC);
else else
append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | append_key(desc, adata->key_dma, adata->keylen,
KEY_DEST_MDHA_SPLIT | KEY_ENC); CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
} else {
append_proto_dkp(desc, adata);
}
set_jump_tgt_here(desc, key_jump_cmd); set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */ /* Class 2 operation */
...@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); ...@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc, static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata, struct alginfo * const cdata,
struct alginfo * const adata, struct alginfo * const adata,
const bool is_rfc3686, u32 *nonce) const bool is_rfc3686, u32 *nonce, int era)
{ {
u32 *key_jump_cmd; u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen; unsigned int enckeylen = cdata->keylen;
...@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc, ...@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686) if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE; enckeylen -= CTR_RFC3686_NONCE_SIZE;
if (era < 6) {
if (adata->key_inline) if (adata->key_inline)
append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, append_key_as_imm(desc, adata->key_virt,
adata->keylen, CLASS_2 | adata->keylen_pad, adata->keylen,
KEY_DEST_MDHA_SPLIT | KEY_ENC); CLASS_2 | KEY_DEST_MDHA_SPLIT |
KEY_ENC);
else else
append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | append_key(desc, adata->key_dma, adata->keylen,
KEY_DEST_MDHA_SPLIT | KEY_ENC); CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
} else {
append_proto_dkp(desc, adata);
}
if (cdata->key_inline) if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen, append_key_as_imm(desc, cdata->key_virt, enckeylen,
...@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc, ...@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions * @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
* @adata: pointer to authentication transform definitions. Note that since a * @adata: pointer to authentication transform definitions.
* split key is to be used, the size of the split key itself is * A split key is required for SEC Era < 6; the size of the split key
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, * is specified in this case. Valid algorithm values - one of
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
* with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full) * @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce * @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register * @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi * @is_qi: true when called from caam/qi
* * @era: SEC Era
* Note: Requires an MDHA split key.
*/ */
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686, unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
int era)
{ {
/* Note: Context registers are saved. */ /* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */ /* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
...@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, ...@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
} }
/* Read and write assoclen bytes */ /* Read and write assoclen bytes */
if (is_qi || era < 3) {
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
} else {
append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
}
/* Skip assoc data */ /* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
...@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap); ...@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions * @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
* @adata: pointer to authentication transform definitions. Note that since a * @adata: pointer to authentication transform definitions.
* split key is to be used, the size of the split key itself is * A split key is required for SEC Era < 6; the size of the split key
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, * is specified in this case. Valid algorithm values - one of
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
* with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full) * @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce * @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register * @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi * @is_qi: true when called from caam/qi
* * @era: SEC Era
* Note: Requires an MDHA split key.
*/ */
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv, unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce, const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off, const bool is_qi) const u32 ctx1_iv_off, const bool is_qi, int era)
{ {
/* Note: Context registers are saved. */ /* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */ /* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
...@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, ...@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
} }
/* Read and write assoclen bytes */ /* Read and write assoclen bytes */
if (is_qi || era < 3) {
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
if (geniv) if (geniv)
append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
ivsize);
else else
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
CAAM_CMD_SZ);
} else {
append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
if (geniv)
append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
ivsize);
else
append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
CAAM_CMD_SZ);
}
/* Skip assoc data */ /* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
...@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap); ...@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions * @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
* @adata: pointer to authentication transform definitions. Note that since a * @adata: pointer to authentication transform definitions.
* split key is to be used, the size of the split key itself is * A split key is required for SEC Era < 6; the size of the split key
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, * is specified in this case. Valid algorithm values - one of
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
* with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full) * @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce * @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register * @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi * @is_qi: true when called from caam/qi
* * @era: SEC Era
* Note: Requires an MDHA split key.
*/ */
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686, unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi) const bool is_qi, int era)
{ {
u32 geniv, moveiv; u32 geniv, moveiv;
/* Note: Context registers are saved. */ /* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) { if (is_qi) {
u32 *wait_load_cmd; u32 *wait_load_cmd;
...@@ -528,8 +561,13 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, ...@@ -528,8 +561,13 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
OP_ALG_ENCRYPT); OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */ /* Read and write assoclen bytes */
if (is_qi || era < 3) {
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
} else {
append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
}
/* Skip assoc data */ /* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
......
...@@ -43,28 +43,28 @@ ...@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ) 15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize); unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize); unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686, unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi); const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv, unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce, const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off, const bool is_qi); const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686, unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi); const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize); unsigned int icvsize);
......
...@@ -53,6 +53,7 @@ struct caam_ctx { ...@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN]; u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE]; u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma; dma_addr_t key_dma;
enum dma_data_direction dir;
struct alginfo adata; struct alginfo adata;
struct alginfo cdata; struct alginfo cdata;
unsigned int authsize; unsigned int authsize;
...@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128); OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686; const bool is_rfc3686 = alg->caam.rfc3686;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize) if (!ctx->cdata.keylen || !ctx->authsize)
return 0; return 0;
...@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce, ivsize, ctx->authsize, is_rfc3686, nonce,
ctx1_iv_off, true); ctx1_iv_off, true, ctrlpriv->era);
skip_enc: skip_enc:
/* aead_decrypt shared descriptor */ /* aead_decrypt shared descriptor */
...@@ -149,7 +151,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -149,7 +151,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv, ivsize, ctx->authsize, alg->caam.geniv,
is_rfc3686, nonce, ctx1_iv_off, true); is_rfc3686, nonce, ctx1_iv_off, true,
ctrlpriv->era);
if (!alg->caam.geniv) if (!alg->caam.geniv)
goto skip_givenc; goto skip_givenc;
...@@ -176,7 +179,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -176,7 +179,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce, ivsize, ctx->authsize, is_rfc3686, nonce,
ctx1_iv_off, true); ctx1_iv_off, true, ctrlpriv->era);
skip_givenc: skip_givenc:
return 0; return 0;
...@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{ {
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys; struct crypto_authenc_keys keys;
int ret = 0; int ret = 0;
...@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif #endif
/*
* If DKP is supported, use it in the shared descriptor to generate
* the split key.
*/
if (ctrlpriv->era >= 6) {
ctx->adata.keylen = keys.authkeylen;
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
OP_ALG_ALGSEL_MASK);
if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
}
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE - keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen); keys.enckeylen);
...@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */ /* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE); keys.enckeylen, ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1); ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif #endif
skip_split_key:
ctx->cdata.keylen = keys.enckeylen; ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead); ret = aead_set_sh_desc(aead);
...@@ -2119,7 +2145,8 @@ struct caam_crypto_alg { ...@@ -2119,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam; struct caam_alg_entry caam;
}; };
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{ {
struct caam_drv_private *priv; struct caam_drv_private *priv;
...@@ -2133,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) ...@@ -2133,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev); return PTR_ERR(ctx->jrdev);
} }
priv = dev_get_drvdata(ctx->jrdev->parent);
if (priv->era >= 6 && uses_dkp)
ctx->dir = DMA_BIDIRECTIONAL;
else
ctx->dir = DMA_TO_DEVICE;
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
DMA_TO_DEVICE); ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n"); dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
...@@ -2145,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) ...@@ -2145,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev; ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock); spin_lock_init(&ctx->lock);
...@@ -2163,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) ...@@ -2163,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg); crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm); struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
return caam_init_common(ctx, &caam_alg->caam); return caam_init_common(ctx, &caam_alg->caam, false);
} }
static int caam_aead_init(struct crypto_aead *tfm) static int caam_aead_init(struct crypto_aead *tfm)
...@@ -2173,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm) ...@@ -2173,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead); aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm); struct caam_ctx *ctx = crypto_aead_ctx(tfm);
return caam_init_common(ctx, &caam_alg->caam); return caam_init_common(ctx, &caam_alg->caam,
alg->setkey == aead_setkey);
} }
static void caam_exit_common(struct caam_ctx *ctx) static void caam_exit_common(struct caam_ctx *ctx)
...@@ -2182,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx) ...@@ -2182,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
} }
......
...@@ -107,6 +107,7 @@ struct caam_hash_ctx { ...@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma; dma_addr_t sh_desc_digest_dma;
enum dma_data_direction dir;
struct device *jrdev; struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE]; u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len; int ctx_len;
...@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, ...@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout * read and write to seqout
*/ */
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
struct caam_hash_ctx *ctx, bool import_ctx) struct caam_hash_ctx *ctx, bool import_ctx,
int era)
{ {
u32 op = ctx->adata.algtype; u32 op = ctx->adata.algtype;
u32 *skip_key_load; u32 *skip_key_load;
...@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, ...@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD); JUMP_COND_SHRD);
if (era < 6)
append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
ctx->adata.keylen, CLASS_2 | ctx->adata.keylen, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC); KEY_DEST_MDHA_SPLIT | KEY_ENC);
else
append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load); set_jump_tgt_here(desc, skip_key_load);
...@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc; u32 *desc;
ctx->adata.key_virt = ctx->key;
/* ahash_update shared descriptor */ /* ahash_update shared descriptor */
desc = ctx->sh_desc_update; desc = ctx->sh_desc_update;
ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ", "ahash update shdesc@"__stringify(__LINE__)": ",
...@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */ /* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first; desc = ctx->sh_desc_update_first;
ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ", "ahash update first shdesc@"__stringify(__LINE__)": ",
...@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */ /* ahash_final shared descriptor */
desc = ctx->sh_desc_fin; desc = ctx->sh_desc_fin;
ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, DUMP_PREFIX_ADDRESS, 16, 4, desc,
...@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */ /* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest; desc = ctx->sh_desc_digest;
ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ", "ahash digest shdesc@"__stringify(__LINE__)": ",
...@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, ...@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base); int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret; int ret;
u8 *hashed_key = NULL; u8 *hashed_key = NULL;
...@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash, ...@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key; key = hashed_key;
} }
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, /*
CAAM_MAX_HASH_KEY_SIZE); * If DKP is supported, use it in the shared descriptor to generate
if (ret) * the split key.
*/
if (ctrlpriv->era >= 6) {
ctx->adata.key_inline = true;
ctx->adata.keylen = keylen;
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
OP_ALG_ALGSEL_MASK);
if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
goto bad_free_key; goto bad_free_key;
#ifdef DEBUG memcpy(ctx->key, key, keylen);
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", } else {
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
ctx->adata.keylen_pad, 1); keylen, CAAM_MAX_HASH_KEY_SIZE);
#endif if (ret)
goto bad_free_key;
}
kfree(hashed_key); kfree(hashed_key);
return ahash_set_sh_desc(ahash); return ahash_set_sh_desc(ahash);
...@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64, HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE }; HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct caam_drv_private *priv;
/* /*
* Get a Job ring from Job Ring driver to ensure in-order * Get a Job ring from Job Ring driver to ensure in-order
...@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev); return PTR_ERR(ctx->jrdev);
} }
priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx, offsetof(struct caam_hash_ctx,
sh_desc_update_dma), sh_desc_update_dma),
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) { if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n"); dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
...@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) ...@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx, offsetof(struct caam_hash_ctx,
sh_desc_update_dma), sh_desc_update_dma),
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
} }
......
...@@ -444,6 +444,18 @@ ...@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
...@@ -1093,6 +1105,22 @@ ...@@ -1093,6 +1105,22 @@
/* MacSec protinfos */ /* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001 #define OP_PCL_MACSEC 0x0001
/* Derived Key Protocol (DKP) Protinfo */
#define OP_PCL_DKP_SRC_SHIFT 14
#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_DST_SHIFT 12
#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_KEY_SHIFT 0
#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
/* PKI unidirectional protocol protinfo bits */ /* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008 #define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004 #define OP_PCL_PKPROT_DECRYPT 0x0004
...@@ -1452,6 +1480,7 @@ ...@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
......
...@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len, ...@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1; return (rem_bytes >= 0) ? 0 : -1;
} }
/**
* append_proto_dkp - Derived Key Protocol (DKP): key -> split key
* @desc: pointer to buffer used for descriptor construction
* @adata: pointer to authentication transform definitions.
* keylen should be the length of initial key, while keylen_pad
* the length of the derived (split) key.
* Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
* SHA256, SHA384, SHA512}.
*/
static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
{
u32 protid;
/*
* Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
* to OP_PCLID_DKP_{MD5, SHA*}
*/
protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
(0x20 << OP_ALG_ALGSEL_SHIFT);
if (adata->key_inline) {
int words;
append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
adata->keylen);
append_data(desc, adata->key_virt, adata->keylen);
/* Reserve space in descriptor buffer for the derived key */
words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
if (words)
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
} else {
append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
adata->keylen);
append_ptr(desc, adata->key_dma);
}
}
#endif /* DESC_CONSTR_H */ #endif /* DESC_CONSTR_H */
...@@ -11,36 +11,6 @@ ...@@ -11,36 +11,6 @@
#include "desc_constr.h" #include "desc_constr.h"
#include "key_gen.h" #include "key_gen.h"
/**
* split_key_len - Compute MDHA split key length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key length
*/
static inline u32 split_key_len(u32 hash)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
u32 idx;
idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
return (u32)(mdpadlen[idx] * 2);
}
/**
* split_key_pad_len - Compute MDHA split key pad length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key pad length
*/
static inline u32 split_key_pad_len(u32 hash)
{
return ALIGN(split_key_len(hash), 16);
}
void split_key_done(struct device *dev, u32 *desc, u32 err, void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context) void *context)
{ {
......
...@@ -6,6 +6,36 @@ ...@@ -6,6 +6,36 @@
* *
*/ */
/**
* split_key_len - Compute MDHA split key length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key length
*/
static inline u32 split_key_len(u32 hash)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
u32 idx;
idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
return (u32)(mdpadlen[idx] * 2);
}
/**
* split_key_pad_len - Compute MDHA split key pad length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key pad length
*/
static inline u32 split_key_pad_len(u32 hash)
{
return ALIGN(split_key_len(hash), 16);
}
struct split_key_result { struct split_key_result {
struct completion completion; struct completion completion;
int err; int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment