Commit 7e0880b9 authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - add Derived Key Protocol (DKP) support

Offload split key generation in CAAM engine, using DKP.
DKP is supported starting with Era 6.

Note that the way assoclen is transmitted from the job descriptor
to the shared descriptor changes - DPOVRD register is used instead
of MATH3 (where available), since DKP protocol thrashes the MATH
registers.

The replacement of MDHA split key generation with DKP has the side
effect of the crypto engine writing the authentication key, and thus
the DMA mapping direction for the buffer holding the key has to change
from DMA_TO_DEVICE to DMA_BIDIRECTIONAL.
There are two cases:
-key is inlined in descriptor - descriptor buffer mapping changes
-key is referenced - key buffer mapping changes
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 9fe712df
This diff is collapsed.
This diff is collapsed.
...@@ -43,28 +43,28 @@ ...@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ) 15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize); unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize); unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686, unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi); const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv, unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce, const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off, const bool is_qi); const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize, struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686, unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi); const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize); unsigned int icvsize);
......
...@@ -53,6 +53,7 @@ struct caam_ctx { ...@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN]; u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE]; u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma; dma_addr_t key_dma;
enum dma_data_direction dir;
struct alginfo adata; struct alginfo adata;
struct alginfo cdata; struct alginfo cdata;
unsigned int authsize; unsigned int authsize;
...@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128); OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686; const bool is_rfc3686 = alg->caam.rfc3686;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize) if (!ctx->cdata.keylen || !ctx->authsize)
return 0; return 0;
...@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce, ivsize, ctx->authsize, is_rfc3686, nonce,
ctx1_iv_off, true); ctx1_iv_off, true, ctrlpriv->era);
skip_enc: skip_enc:
/* aead_decrypt shared descriptor */ /* aead_decrypt shared descriptor */
...@@ -149,7 +151,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -149,7 +151,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv, ivsize, ctx->authsize, alg->caam.geniv,
is_rfc3686, nonce, ctx1_iv_off, true); is_rfc3686, nonce, ctx1_iv_off, true,
ctrlpriv->era);
if (!alg->caam.geniv) if (!alg->caam.geniv)
goto skip_givenc; goto skip_givenc;
...@@ -176,7 +179,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -176,7 +179,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce, ivsize, ctx->authsize, is_rfc3686, nonce,
ctx1_iv_off, true); ctx1_iv_off, true, ctrlpriv->era);
skip_givenc: skip_givenc:
return 0; return 0;
...@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{ {
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys; struct crypto_authenc_keys keys;
int ret = 0; int ret = 0;
...@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif #endif
/*
* If DKP is supported, use it in the shared descriptor to generate
* the split key.
*/
if (ctrlpriv->era >= 6) {
ctx->adata.keylen = keys.authkeylen;
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
OP_ALG_ALGSEL_MASK);
if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
}
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE - keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen); keys.enckeylen);
...@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */ /* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE); keys.enckeylen, ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1); ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif #endif
skip_split_key:
ctx->cdata.keylen = keys.enckeylen; ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead); ret = aead_set_sh_desc(aead);
...@@ -2119,7 +2145,8 @@ struct caam_crypto_alg { ...@@ -2119,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam; struct caam_alg_entry caam;
}; };
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{ {
struct caam_drv_private *priv; struct caam_drv_private *priv;
...@@ -2133,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) ...@@ -2133,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev); return PTR_ERR(ctx->jrdev);
} }
priv = dev_get_drvdata(ctx->jrdev->parent);
if (priv->era >= 6 && uses_dkp)
ctx->dir = DMA_BIDIRECTIONAL;
else
ctx->dir = DMA_TO_DEVICE;
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
DMA_TO_DEVICE); ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n"); dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
...@@ -2145,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) ...@@ -2145,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev; ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock); spin_lock_init(&ctx->lock);
...@@ -2163,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) ...@@ -2163,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg); crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm); struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
return caam_init_common(ctx, &caam_alg->caam); return caam_init_common(ctx, &caam_alg->caam, false);
} }
static int caam_aead_init(struct crypto_aead *tfm) static int caam_aead_init(struct crypto_aead *tfm)
...@@ -2173,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm) ...@@ -2173,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead); aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm); struct caam_ctx *ctx = crypto_aead_ctx(tfm);
return caam_init_common(ctx, &caam_alg->caam); return caam_init_common(ctx, &caam_alg->caam,
alg->setkey == aead_setkey);
} }
static void caam_exit_common(struct caam_ctx *ctx) static void caam_exit_common(struct caam_ctx *ctx)
...@@ -2182,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx) ...@@ -2182,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
} }
......
...@@ -107,6 +107,7 @@ struct caam_hash_ctx { ...@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma; dma_addr_t sh_desc_digest_dma;
enum dma_data_direction dir;
struct device *jrdev; struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE]; u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len; int ctx_len;
...@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, ...@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout * read and write to seqout
*/ */
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
struct caam_hash_ctx *ctx, bool import_ctx) struct caam_hash_ctx *ctx, bool import_ctx,
int era)
{ {
u32 op = ctx->adata.algtype; u32 op = ctx->adata.algtype;
u32 *skip_key_load; u32 *skip_key_load;
...@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, ...@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD); JUMP_COND_SHRD);
append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, if (era < 6)
ctx->adata.keylen, CLASS_2 | append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
KEY_DEST_MDHA_SPLIT | KEY_ENC); ctx->adata.keylen, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
else
append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load); set_jump_tgt_here(desc, skip_key_load);
...@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc; u32 *desc;
ctx->adata.key_virt = ctx->key;
/* ahash_update shared descriptor */ /* ahash_update shared descriptor */
desc = ctx->sh_desc_update; desc = ctx->sh_desc_update;
ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ", "ahash update shdesc@"__stringify(__LINE__)": ",
...@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */ /* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first; desc = ctx->sh_desc_update_first;
ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ", "ahash update first shdesc@"__stringify(__LINE__)": ",
...@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */ /* ahash_final shared descriptor */
desc = ctx->sh_desc_fin; desc = ctx->sh_desc_fin;
ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, DUMP_PREFIX_ADDRESS, 16, 4, desc,
...@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) ...@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */ /* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest; desc = ctx->sh_desc_digest;
ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), DMA_TO_DEVICE); desc_bytes(desc), ctx->dir);
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ", "ahash digest shdesc@"__stringify(__LINE__)": ",
...@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, ...@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base); int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret; int ret;
u8 *hashed_key = NULL; u8 *hashed_key = NULL;
...@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash, ...@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key; key = hashed_key;
} }
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, /*
CAAM_MAX_HASH_KEY_SIZE); * If DKP is supported, use it in the shared descriptor to generate
if (ret) * the split key.
goto bad_free_key; */
if (ctrlpriv->era >= 6) {
ctx->adata.key_inline = true;
ctx->adata.keylen = keylen;
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
OP_ALG_ALGSEL_MASK);
#ifdef DEBUG if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", goto bad_free_key;
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad, 1); memcpy(ctx->key, key, keylen);
#endif } else {
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
keylen, CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
}
kfree(hashed_key); kfree(hashed_key);
return ahash_set_sh_desc(ahash); return ahash_set_sh_desc(ahash);
...@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64, HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE }; HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct caam_drv_private *priv;
/* /*
* Get a Job ring from Job Ring driver to ensure in-order * Get a Job ring from Job Ring driver to ensure in-order
...@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev); return PTR_ERR(ctx->jrdev);
} }
priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx, offsetof(struct caam_hash_ctx,
sh_desc_update_dma), sh_desc_update_dma),
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) { if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n"); dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
...@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) ...@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx, offsetof(struct caam_hash_ctx,
sh_desc_update_dma), sh_desc_update_dma),
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
} }
......
...@@ -444,6 +444,18 @@ ...@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
...@@ -1093,6 +1105,22 @@ ...@@ -1093,6 +1105,22 @@
/* MacSec protinfos */ /* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001 #define OP_PCL_MACSEC 0x0001
/* Derived Key Protocol (DKP) Protinfo */
#define OP_PCL_DKP_SRC_SHIFT 14
#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
#define OP_PCL_DKP_DST_SHIFT 12
#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
#define OP_PCL_DKP_KEY_SHIFT 0
#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
/* PKI unidirectional protocol protinfo bits */ /* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008 #define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004 #define OP_PCL_PKPROT_DECRYPT 0x0004
...@@ -1452,6 +1480,7 @@ ...@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
......
...@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len, ...@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1; return (rem_bytes >= 0) ? 0 : -1;
} }
/**
* append_proto_dkp - Derived Key Protocol (DKP): key -> split key
* @desc: pointer to buffer used for descriptor construction
* @adata: pointer to authentication transform definitions.
* keylen should be the length of initial key, while keylen_pad
* the length of the derived (split) key.
* Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
* SHA256, SHA384, SHA512}.
*/
static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
{
u32 protid;
/*
* Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
* to OP_PCLID_DKP_{MD5, SHA*}
*/
protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
(0x20 << OP_ALG_ALGSEL_SHIFT);
if (adata->key_inline) {
int words;
append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
adata->keylen);
append_data(desc, adata->key_virt, adata->keylen);
/* Reserve space in descriptor buffer for the derived key */
words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
if (words)
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
} else {
append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
adata->keylen);
append_ptr(desc, adata->key_dma);
}
}
#endif /* DESC_CONSTR_H */ #endif /* DESC_CONSTR_H */
...@@ -11,36 +11,6 @@ ...@@ -11,36 +11,6 @@
#include "desc_constr.h" #include "desc_constr.h"
#include "key_gen.h" #include "key_gen.h"
/**
* split_key_len - Compute MDHA split key length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key length
*/
static inline u32 split_key_len(u32 hash)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
u32 idx;
idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
return (u32)(mdpadlen[idx] * 2);
}
/**
* split_key_pad_len - Compute MDHA split key pad length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key pad length
*/
static inline u32 split_key_pad_len(u32 hash)
{
return ALIGN(split_key_len(hash), 16);
}
void split_key_done(struct device *dev, u32 *desc, u32 err, void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context) void *context)
{ {
......
...@@ -6,6 +6,36 @@ ...@@ -6,6 +6,36 @@
* *
*/ */
/**
* split_key_len - Compute MDHA split key length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key length
*/
static inline u32 split_key_len(u32 hash)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
u32 idx;
idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
return (u32)(mdpadlen[idx] * 2);
}
/**
* split_key_pad_len - Compute MDHA split key pad length for a given algorithm
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
* SHA224, SHA384, SHA512.
*
* Return: MDHA split key pad length
*/
static inline u32 split_key_pad_len(u32 hash)
{
return ALIGN(split_key_len(hash), 16);
}
struct split_key_result { struct split_key_result {
struct completion completion; struct completion completion;
int err; int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment