Commit 7fe948a5 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: qat - switch to skcipher API

Commit 7a7ffe65 ("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.

So switch this driver to the skcipher API, allowing us to finally drop the
ablkcipher code in the near future.
Co-developed-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 373960d7
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/hash.h> #include <crypto/hash.h>
...@@ -122,7 +123,7 @@ struct qat_alg_aead_ctx { ...@@ -122,7 +123,7 @@ struct qat_alg_aead_ctx {
char opad[SHA512_BLOCK_SIZE]; char opad[SHA512_BLOCK_SIZE];
}; };
struct qat_alg_ablkcipher_ctx { struct qat_alg_skcipher_ctx {
struct icp_qat_hw_cipher_algo_blk *enc_cd; struct icp_qat_hw_cipher_algo_blk *enc_cd;
struct icp_qat_hw_cipher_algo_blk *dec_cd; struct icp_qat_hw_cipher_algo_blk *dec_cd;
dma_addr_t enc_cd_paddr; dma_addr_t enc_cd_paddr;
...@@ -130,7 +131,7 @@ struct qat_alg_ablkcipher_ctx { ...@@ -130,7 +131,7 @@ struct qat_alg_ablkcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req; struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req; struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst; struct qat_crypto_instance *inst;
struct crypto_tfm *tfm; struct crypto_skcipher *tfm;
}; };
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
...@@ -463,7 +464,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, ...@@ -463,7 +464,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return 0; return 0;
} }
static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req, struct icp_qat_fw_la_bulk_req *req,
struct icp_qat_hw_cipher_algo_blk *cd, struct icp_qat_hw_cipher_algo_blk *cd,
const uint8_t *key, unsigned int keylen) const uint8_t *key, unsigned int keylen)
...@@ -485,7 +486,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -485,7 +486,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
} }
static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
int alg, const uint8_t *key, int alg, const uint8_t *key,
unsigned int keylen, int mode) unsigned int keylen, int mode)
{ {
...@@ -493,12 +494,12 @@ static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -493,12 +494,12 @@ static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode); enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
} }
static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
int alg, const uint8_t *key, int alg, const uint8_t *key,
unsigned int keylen, int mode) unsigned int keylen, int mode)
{ {
...@@ -506,7 +507,7 @@ static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -506,7 +507,7 @@ static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
...@@ -577,7 +578,7 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key, ...@@ -577,7 +578,7 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
return -EFAULT; return -EFAULT;
} }
static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
const uint8_t *key, const uint8_t *key,
unsigned int keylen, unsigned int keylen,
int mode) int mode)
...@@ -587,11 +588,11 @@ static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -587,11 +588,11 @@ static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
if (qat_alg_validate_key(keylen, &alg, mode)) if (qat_alg_validate_key(keylen, &alg, mode))
goto bad_key; goto bad_key;
qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode); qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode); qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0; return 0;
bad_key: bad_key:
crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL; return -EINVAL;
} }
...@@ -832,12 +833,12 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, ...@@ -832,12 +833,12 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
areq->base.complete(&areq->base, res); areq->base.complete(&areq->base, res);
} }
static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_request *qat_req) struct qat_crypto_request *qat_req)
{ {
struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst; struct qat_crypto_instance *inst = ctx->inst;
struct ablkcipher_request *areq = qat_req->ablkcipher_req; struct skcipher_request *sreq = qat_req->skcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status; uint8_t stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev); struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
...@@ -846,11 +847,11 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, ...@@ -846,11 +847,11 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL; res = -EINVAL;
memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE); memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr); qat_req->iv_paddr);
areq->base.complete(&areq->base, res); sreq->base.complete(&sreq->base, res);
} }
void qat_alg_callback(void *resp) void qat_alg_callback(void *resp)
...@@ -949,7 +950,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) ...@@ -949,7 +950,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINPROGRESS; return -EINPROGRESS;
} }
static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx, static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
const u8 *key, unsigned int keylen, const u8 *key, unsigned int keylen,
int mode) int mode)
{ {
...@@ -958,10 +959,10 @@ static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -958,10 +959,10 @@ static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode); return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
} }
static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx, static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
const u8 *key, unsigned int keylen, const u8 *key, unsigned int keylen,
int mode) int mode)
{ {
...@@ -990,7 +991,7 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -990,7 +991,7 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
goto out_free_enc; goto out_free_enc;
} }
ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode); ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
if (ret) if (ret)
goto out_free_all; goto out_free_all;
...@@ -1012,51 +1013,51 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx, ...@@ -1012,51 +1013,51 @@ static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
return ret; return ret;
} }
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen, const u8 *key, unsigned int keylen,
int mode) int mode)
{ {
struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->enc_cd) if (ctx->enc_cd)
return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode); return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else else
return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode); return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
} }
static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
return qat_alg_ablkcipher_setkey(tfm, key, keylen, return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_CBC_MODE); ICP_QAT_HW_CIPHER_CBC_MODE);
} }
static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm, static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
return qat_alg_ablkcipher_setkey(tfm, key, keylen, return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_CTR_MODE); ICP_QAT_HW_CIPHER_CTR_MODE);
} }
static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm, static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
return qat_alg_ablkcipher_setkey(tfm, key, keylen, return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_XTS_MODE); ICP_QAT_HW_CIPHER_XTS_MODE);
} }
static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
{ {
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg; struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev); struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0; int ret, ctr = 0;
if (req->nbytes == 0) if (req->cryptlen == 0)
return 0; return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
...@@ -1073,17 +1074,17 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) ...@@ -1073,17 +1074,17 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
msg = &qat_req->req; msg = &qat_req->req;
*msg = ctx->enc_fw_req; *msg = ctx->enc_fw_req;
qat_req->ablkcipher_ctx = ctx; qat_req->skcipher_ctx = ctx;
qat_req->ablkcipher_req = req; qat_req->skcipher_req = req;
qat_req->cb = qat_ablkcipher_alg_callback; qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars; cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->nbytes; cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0; cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE); memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do { do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10); } while (ret == -EAGAIN && ctr++ < 10);
...@@ -1097,26 +1098,26 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) ...@@ -1097,26 +1098,26 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
return -EINPROGRESS; return -EINPROGRESS;
} }
static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req) static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
{ {
if (req->nbytes % AES_BLOCK_SIZE != 0) if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL; return -EINVAL;
return qat_alg_ablkcipher_encrypt(req); return qat_alg_skcipher_encrypt(req);
} }
static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{ {
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg; struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev); struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0; int ret, ctr = 0;
if (req->nbytes == 0) if (req->cryptlen == 0)
return 0; return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
...@@ -1133,17 +1134,17 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) ...@@ -1133,17 +1134,17 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
msg = &qat_req->req; msg = &qat_req->req;
*msg = ctx->dec_fw_req; *msg = ctx->dec_fw_req;
qat_req->ablkcipher_ctx = ctx; qat_req->skcipher_ctx = ctx;
qat_req->ablkcipher_req = req; qat_req->skcipher_req = req;
qat_req->cb = qat_ablkcipher_alg_callback; qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars; cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->nbytes; cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0; cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE); memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do { do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10); } while (ret == -EAGAIN && ctr++ < 10);
...@@ -1157,12 +1158,12 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) ...@@ -1157,12 +1158,12 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS; return -EINPROGRESS;
} }
static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req) static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
{ {
if (req->nbytes % AES_BLOCK_SIZE != 0) if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL; return -EINVAL;
return qat_alg_ablkcipher_decrypt(req); return qat_alg_skcipher_decrypt(req);
} }
static int qat_alg_aead_init(struct crypto_aead *tfm, static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash, enum icp_qat_hw_auth_algo hash,
...@@ -1218,18 +1219,18 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm) ...@@ -1218,18 +1219,18 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
qat_crypto_put_instance(inst); qat_crypto_put_instance(inst);
} }
static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{ {
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
ctx->tfm = tfm; ctx->tfm = tfm;
return 0; return 0;
} }
static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{ {
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst; struct qat_crypto_instance *inst = ctx->inst;
struct device *dev; struct device *dev;
...@@ -1308,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { { ...@@ -1308,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { {
.maxauthsize = SHA512_DIGEST_SIZE, .maxauthsize = SHA512_DIGEST_SIZE,
} }; } };
static struct crypto_alg qat_algs[] = { { static struct skcipher_alg qat_skciphers[] = { {
.cra_name = "cbc(aes)", .base.cra_name = "cbc(aes)",
.cra_driver_name = "qat_aes_cbc", .base.cra_driver_name = "qat_aes_cbc",
.cra_priority = 4001, .base.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .base.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.cra_alignmask = 0, .base.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .base.cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_init = qat_alg_ablkcipher_init, .init = qat_alg_skcipher_init_tfm,
.cra_exit = qat_alg_ablkcipher_exit, .exit = qat_alg_skcipher_exit_tfm,
.cra_u = { .setkey = qat_alg_skcipher_cbc_setkey,
.ablkcipher = { .decrypt = qat_alg_skcipher_blk_decrypt,
.setkey = qat_alg_ablkcipher_cbc_setkey, .encrypt = qat_alg_skcipher_blk_encrypt,
.decrypt = qat_alg_ablkcipher_blk_decrypt,
.encrypt = qat_alg_ablkcipher_blk_encrypt,
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
},
},
}, { }, {
.cra_name = "ctr(aes)", .base.cra_name = "ctr(aes)",
.cra_driver_name = "qat_aes_ctr", .base.cra_driver_name = "qat_aes_ctr",
.cra_priority = 4001, .base.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .base.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1, .base.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.cra_alignmask = 0, .base.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .base.cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_init = qat_alg_ablkcipher_init, .init = qat_alg_skcipher_init_tfm,
.cra_exit = qat_alg_ablkcipher_exit, .exit = qat_alg_skcipher_exit_tfm,
.cra_u = { .setkey = qat_alg_skcipher_ctr_setkey,
.ablkcipher = { .decrypt = qat_alg_skcipher_decrypt,
.setkey = qat_alg_ablkcipher_ctr_setkey, .encrypt = qat_alg_skcipher_encrypt,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
},
},
}, { }, {
.cra_name = "xts(aes)", .base.cra_name = "xts(aes)",
.cra_driver_name = "qat_aes_xts", .base.cra_driver_name = "qat_aes_xts",
.cra_priority = 4001, .base.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .base.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.cra_alignmask = 0, .base.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .base.cra_module = THIS_MODULE,
.cra_module = THIS_MODULE,
.cra_init = qat_alg_ablkcipher_init, .init = qat_alg_skcipher_init_tfm,
.cra_exit = qat_alg_ablkcipher_exit, .exit = qat_alg_skcipher_exit_tfm,
.cra_u = { .setkey = qat_alg_skcipher_xts_setkey,
.ablkcipher = { .decrypt = qat_alg_skcipher_blk_decrypt,
.setkey = qat_alg_ablkcipher_xts_setkey, .encrypt = qat_alg_skcipher_blk_encrypt,
.decrypt = qat_alg_ablkcipher_blk_decrypt,
.encrypt = qat_alg_ablkcipher_blk_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE, .min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
},
},
} }; } };
int qat_algs_register(void) int qat_algs_register(void)
{ {
int ret = 0, i; int ret = 0;
mutex_lock(&algs_lock); mutex_lock(&algs_lock);
if (++active_devs != 1) if (++active_devs != 1)
goto unlock; goto unlock;
for (i = 0; i < ARRAY_SIZE(qat_algs); i++) ret = crypto_register_skciphers(qat_skciphers,
qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; ARRAY_SIZE(qat_skciphers));
ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
if (ret) if (ret)
goto unlock; goto unlock;
for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
if (ret) if (ret)
goto unreg_algs; goto unreg_algs;
...@@ -1403,7 +1387,7 @@ int qat_algs_register(void) ...@@ -1403,7 +1387,7 @@ int qat_algs_register(void)
return ret; return ret;
unreg_algs: unreg_algs:
crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
goto unlock; goto unlock;
} }
...@@ -1414,7 +1398,7 @@ void qat_algs_unregister(void) ...@@ -1414,7 +1398,7 @@ void qat_algs_unregister(void)
goto unlock; goto unlock;
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
unlock: unlock:
mutex_unlock(&algs_lock); mutex_unlock(&algs_lock);
......
...@@ -79,11 +79,11 @@ struct qat_crypto_request { ...@@ -79,11 +79,11 @@ struct qat_crypto_request {
struct icp_qat_fw_la_bulk_req req; struct icp_qat_fw_la_bulk_req req;
union { union {
struct qat_alg_aead_ctx *aead_ctx; struct qat_alg_aead_ctx *aead_ctx;
struct qat_alg_ablkcipher_ctx *ablkcipher_ctx; struct qat_alg_skcipher_ctx *skcipher_ctx;
}; };
union { union {
struct aead_request *aead_req; struct aead_request *aead_req;
struct ablkcipher_request *ablkcipher_req; struct skcipher_request *skcipher_req;
}; };
struct qat_crypto_request_buffs buf; struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp, void (*cb)(struct icp_qat_fw_la_resp *resp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment