Commit 416d8220 authored by Zaibo Xu's avatar Zaibo Xu Committed by Herbert Xu

crypto: hisilicon - add HiSilicon SEC V2 driver

SEC driver provides PCIe hardware device initiation with
AES, SM4, and 3DES skcipher algorithms registered to Crypto.
It uses Hisilicon QM as interface to CPU.
Signed-off-by: default avatarZaibo Xu <xuzaibo@huawei.com>
Signed-off-by: default avatarLongfang Liu <liulongfang@huawei.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent aee1f9f3
......@@ -14,6 +14,22 @@ config CRYPTO_DEV_HISI_SEC
To compile this as a module, choose M here: the module
will be called hisi_sec.
config CRYPTO_DEV_HISI_SEC2
tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
select CRYPTO_BLKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_DEV_HISI_QM
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
CBC, and XTS cipher mode.
To compile this as a module, choose M here: the module
will be called hisi_sec2.
config CRYPTO_DEV_HISI_QM
tristate
depends on ARM64 || COMPILE_TEST
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
hisi_qm-objs = qm.o sgl.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += hisi_sec2.o
hisi_sec2-objs = sec_main.o sec_crypto.o
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 HiSilicon Limited. */
#ifndef __HISI_SEC_V2_H
#define __HISI_SEC_V2_H
#include <linux/list.h>
#include "../qm.h"
#include "sec_crypto.h"
/* Cipher resource per hardware SEC queue */
struct sec_cipher_res {
u8 *c_ivin;
dma_addr_t c_ivin_dma;
};
/* Cipher request of SEC private */
struct sec_cipher_req {
struct hisi_acc_hw_sgl *c_in;
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
};
/* SEC request of Crypto */
struct sec_req {
struct sec_sqe sec_sqe;
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
/* Cipher supported only at present */
struct sec_cipher_req c_req;
int err_type;
int req_id;
/* Status of the SEC request */
int fake_busy;
};
/**
* struct sec_req_op - Operations for SEC request
* @get_res: Get resources for TFM on the SEC device
* @resource_alloc: Allocate resources for queue context on the SEC device
* @resource_free: Free resources for queue context on the SEC device
* @buf_map: DMA map the SGL buffers of the request
* @buf_unmap: DMA unmap the SGL buffers of the request
* @bd_fill: Fill the SEC queue BD
* @bd_send: Send the SEC BD into the hardware queue
* @callback: Call back for the request
* @process: Main processing logic of Skcipher
*/
struct sec_req_op {
int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
};
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 *c_key;
dma_addr_t c_key_dma;
sector_t iv_offset;
u32 c_gran_size;
u32 ivsize;
u8 c_mode;
u8 c_alg;
u8 c_key_len;
};
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
struct sec_req **req_list;
struct idr req_idr;
void *alg_meta_data;
struct sec_ctx *ctx;
struct mutex req_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
atomic_t pending_reqs;
};
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
struct sec_dev *sec;
const struct sec_req_op *req_op;
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
/* Threshold for fake busy, trigger to return -EBUSY to user */
u32 fake_req_limit;
/* Currrent cyclic index to select a queue for encipher */
atomic_t enc_qcyclic;
/* Currrent cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
struct sec_cipher_ctx c_ctx;
};
enum sec_endian {
SEC_LE = 0,
SEC_32BE,
SEC_64BE
};
struct sec_dev {
struct hisi_qm qm;
struct list_head list;
u32 ctx_q_num;
unsigned long status;
};
struct sec_dev *sec_find_device(int node);
int sec_register_to_crypto(void);
void sec_unregister_from_crypto(void);
#endif
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 HiSilicon Limited. */
#ifndef __HISI_SEC_V2_CRYPTO_H
#define __HISI_SEC_V2_CRYPTO_H
#define SEC_IV_SIZE 24
#define SEC_MAX_KEY_SIZE 64
#define SEC_COMM_SCENE 0
enum sec_calg {
SEC_CALG_3DES = 0x1,
SEC_CALG_AES = 0x2,
SEC_CALG_SM4 = 0x3,
};
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
SEC_CMODE_CTR = 0x4,
SEC_CMODE_XTS = 0x7,
};
enum sec_ckey_type {
SEC_CKEY_128BIT = 0x0,
SEC_CKEY_192BIT = 0x1,
SEC_CKEY_256BIT = 0x2,
SEC_CKEY_3DES_3KEY = 0x1,
SEC_CKEY_3DES_2KEY = 0x3,
};
enum sec_bd_type {
SEC_BD_TYPE1 = 0x1,
SEC_BD_TYPE2 = 0x2,
};
enum sec_cipher_dir {
SEC_CIPHER_ENC = 0x1,
SEC_CIPHER_DEC = 0x2,
};
enum sec_addr_type {
SEC_PBUF = 0x0,
SEC_SGL = 0x1,
SEC_PRP = 0x2,
};
struct sec_sqe_type2 {
/*
* mac_len: 0~5 bits
* a_key_len: 6~10 bits
* a_alg: 11~16 bits
*/
__le32 mac_key_alg;
/*
* c_icv_len: 0~5 bits
* c_width: 6~8 bits
* c_key_len: 9~11 bits
* c_mode: 12~15 bits
*/
__le16 icvw_kmode;
/* c_alg: 0~3 bits */
__u8 c_alg;
__u8 rsvd4;
/*
* a_len: 0~23 bits
* iv_offset_l: 24~31 bits
*/
__le32 alen_ivllen;
/*
* c_len: 0~23 bits
* iv_offset_h: 24~31 bits
*/
__le32 clen_ivhlen;
__le16 auth_src_offset;
__le16 cipher_src_offset;
__le16 cs_ip_header_offset;
__le16 cs_udp_header_offset;
__le16 pass_word_len;
__le16 dk_len;
__u8 salt3;
__u8 salt2;
__u8 salt1;
__u8 salt0;
__le16 tag;
__le16 rsvd5;
/*
* c_pad_type: 0~3 bits
* c_pad_len: 4~11 bits
* c_pad_data_type: 12~15 bits
*/
__le16 cph_pad;
/* c_pad_len_field: 0~1 bits */
__le16 c_pad_len_field;
__le64 long_a_data_len;
__le64 a_ivin_addr;
__le64 a_key_addr;
__le64 mac_addr;
__le64 c_ivin_addr;
__le64 c_key_addr;
__le64 data_src_addr;
__le64 data_dst_addr;
/*
* done: 0 bit
* icv: 1~3 bits
* csc: 4~6 bits
* flag: 7-10 bits
* dif_check: 11~13 bits
*/
__le16 done_flag;
__u8 error_type;
__u8 warning_type;
__u8 mac_i3;
__u8 mac_i2;
__u8 mac_i1;
__u8 mac_i0;
__le16 check_sum_i;
__u8 tls_pad_len_i;
__u8 rsvd12;
__le32 counter;
};
struct sec_sqe {
/*
* type: 0~3 bits
* cipher: 4~5 bits
* auth: 6~7 bit s
*/
__u8 type_cipher_auth;
/*
* seq: 0 bit
* de: 1~2 bits
* scene: 3~6 bits
* src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
*/
__u8 sds_sa_type;
/*
* src_addr_type: 0~1 bits, not used now,
* if support PRP, set this field, or set zero.
* dst_addr_type: 2~4 bits
* mac_addr_type: 5~7 bits
*/
__u8 sdm_addr_type;
__u8 rsvd0;
/*
* nonce_len(type2): 0~3 bits
* huk(type2): 4 bit
* key_s(type2): 5 bit
* ci_gen: 6~7 bits
*/
__u8 huk_key_ci;
/*
* ai_gen: 0~1 bits
* a_pad(type2): 2~3 bits
* c_s(type2): 4~5 bits
*/
__u8 ai_apd_cs;
/*
* rhf(type2): 0 bit
* c_key_type: 1~2 bits
* a_key_type: 3~4 bits
* write_frame_len(type2): 5~7 bits
*/
__u8 rca_key_frm;
/*
* cal_iv_addr_en(type2): 0 bit
* tls_up(type2): 1 bit
* inveld: 7 bit
*/
__u8 iv_tls_ld;
/* Just using type2 BD now */
struct sec_sqe_type2 type2;
};
int sec_register_to_crypto(void);
void sec_unregister_from_crypto(void);
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment