Commit e4e37acc authored by Marco Chiappero's avatar Marco Chiappero Committed by Herbert Xu

crypto: qat - update IV in software

Do IV update calculations in software for AES-CBC and AES-CTR.

This allows to embed the IV on the request descriptor and removes the
allocation of the IV buffer in the data path.

In addition, this change allows the support of QAT devices that are not
capable of updating the IV buffer when performing an AES-CBC or AES-CTR
operation.
Signed-off-by: default avatarMarco Chiappero <marco.chiappero@intel.com>
Co-developed-by: default avatarMateusz Polrola <mateuszx.potrola@intel.com>
Signed-off-by: default avatarMateusz Polrola <mateuszx.potrola@intel.com>
Co-developed-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Tested-by: default avatarIndrasena Reddy Gali <indrasena.reddygali@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c98e2330
......@@ -11,6 +11,7 @@
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
......@@ -90,6 +91,7 @@ struct qat_alg_skcipher_ctx {
struct qat_crypto_instance *inst;
struct crypto_skcipher *ftfm;
bool fallback;
int mode;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
......@@ -214,24 +216,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
return 0;
}
static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
{
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_UPDATE_STATE);
}
static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
{
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
int aead)
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
{
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
......@@ -241,12 +226,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
QAT_COMN_PTR_TYPE_SGL);
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_PARTIAL_NONE);
if (aead)
qat_alg_init_hdr_no_iv_updt(header);
else
qat_alg_init_hdr_iv_updt(header);
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_PROTO);
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
......@@ -281,7 +266,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
return -EFAULT;
/* Request setup */
qat_alg_init_common_hdr(header, 1);
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
......@@ -368,7 +353,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return -EFAULT;
/* Request setup */
qat_alg_init_common_hdr(header, 1);
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
......@@ -432,7 +417,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
memcpy(cd->aes.key, key, keylen);
qat_alg_init_common_hdr(header, 0);
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
cd_pars->u.s.content_desc_params_sz =
sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
......@@ -787,6 +772,61 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
areq->base.complete(&areq->base, res);
}
static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
{
struct skcipher_request *sreq = qat_req->skcipher_req;
u64 iv_lo_prev;
u64 iv_lo;
u64 iv_hi;
memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
iv_lo = be64_to_cpu(qat_req->iv_lo);
iv_hi = be64_to_cpu(qat_req->iv_hi);
iv_lo_prev = iv_lo;
iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
if (iv_lo < iv_lo_prev)
iv_hi++;
qat_req->iv_lo = cpu_to_be64(iv_lo);
qat_req->iv_hi = cpu_to_be64(iv_hi);
}
static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
{
struct skcipher_request *sreq = qat_req->skcipher_req;
int offset = sreq->cryptlen - AES_BLOCK_SIZE;
struct scatterlist *sgl;
if (qat_req->encryption)
sgl = sreq->dst;
else
sgl = sreq->src;
scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
}
static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
{
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
switch (ctx->mode) {
case ICP_QAT_HW_CIPHER_CTR_MODE:
qat_alg_update_iv_ctr_mode(qat_req);
break;
case ICP_QAT_HW_CIPHER_CBC_MODE:
qat_alg_update_iv_cbc_mode(qat_req);
break;
case ICP_QAT_HW_CIPHER_XTS_MODE:
break;
default:
dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
ctx->mode);
}
}
static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_request *qat_req)
{
......@@ -794,16 +834,16 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_instance *inst = ctx->inst;
struct skcipher_request *sreq = qat_req->skcipher_req;
u8 stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_alg_free_bufl(inst, qat_req);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
if (qat_req->encryption)
qat_alg_update_iv(qat_req);
memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
sreq->base.complete(&sreq->base, res);
}
......@@ -981,6 +1021,8 @@ static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->mode = mode;
if (ctx->enc_cd)
return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else
......@@ -1035,23 +1077,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
&qat_req->iv_paddr, GFP_ATOMIC);
if (!qat_req->iv)
return -ENOMEM;
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret)) {
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
if (unlikely(ret))
return ret;
}
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
......@@ -1061,19 +1094,18 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
qat_req->encryption = true;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
return -EBUSY;
}
return -EINPROGRESS;
......@@ -1113,23 +1145,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
&qat_req->iv_paddr, GFP_ATOMIC);
if (!qat_req->iv)
return -ENOMEM;
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret)) {
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
if (unlikely(ret))
return ret;
}
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
......@@ -1139,19 +1162,20 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
qat_req->encryption = false;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
qat_alg_update_iv(qat_req);
do {
ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
return -EBUSY;
}
return -EINPROGRESS;
......
......@@ -3,6 +3,7 @@
#ifndef _QAT_CRYPTO_INSTANCE_H_
#define _QAT_CRYPTO_INSTANCE_H_
#include <crypto/aes.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "adf_accel_devices.h"
......@@ -44,8 +45,14 @@ struct qat_crypto_request {
struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
struct qat_crypto_request *req);
void *iv;
dma_addr_t iv_paddr;
union {
struct {
__be64 iv_hi;
__be64 iv_lo;
};
u8 iv[AES_BLOCK_SIZE];
};
bool encryption;
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment