Commit 63893811 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Herbert Xu

crypto: ccree - add ahash support

Add CryptoCell async. hash and HMAC support.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 63ee04c8
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o cc_sram_mgr.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_ivgen.o cc_sram_mgr.o
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
ccree-$(CONFIG_PM) += cc_pm.o
......@@ -9,6 +9,7 @@
#include "cc_buffer_mgr.h"
#include "cc_lli_defs.h"
#include "cc_cipher.h"
#include "cc_hash.h"
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
......@@ -348,6 +349,30 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
return 0;
}
static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
u8 *curr_buff, u32 curr_buff_cnt,
struct buffer_array *sg_data)
{
dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
false, NULL);
return 0;
}
void cc_unmap_cipher_request(struct device *dev, void *ctx,
unsigned int ivsize, struct scatterlist *src,
struct scatterlist *dst)
......@@ -472,6 +497,238 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
return rc;
}
int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
bool do_update, gfp_t flags)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
u8 *curr_buff = cc_hash_buf(areq_ctx);
u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
u32 dummy = 0;
u32 mapped_nents = 0;
dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
if (nbytes == 0 && *curr_buff_cnt == 0) {
/* nothing to do */
return 0;
}
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data)) {
return -ENOMEM;
}
}
if (src && nbytes > 0 && do_update) {
if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents)) {
goto unmap_curr_buff;
}
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = nbytes;
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
} else {
areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
}
}
/*build mlli */
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
cc_dma_buf_type(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
unsigned int block_size, gfp_t flags)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
u8 *curr_buff = cc_hash_buf(areq_ctx);
u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
u8 *next_buff = cc_next_buf(areq_ctx);
u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
unsigned int update_data_len;
u32 total_in_len = nbytes + *curr_buff_cnt;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
areq_ctx->curr_sg = NULL;
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
return 1;
}
/* Calculate the residue size*/
*next_buff_cnt = total_in_len & (block_size - 1);
/* update data len */
update_data_len = total_in_len - *next_buff_cnt;
dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
*next_buff_cnt, update_data_len);
/* Copy the new residue to next buffer */
if (*next_buff_cnt) {
dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
cc_copy_sg_portion(dev, next_buff, src,
(update_data_len - *curr_buff_cnt),
nbytes, CC_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
if (*curr_buff_cnt) {
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data)) {
return -ENOMEM;
}
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE, &areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents)) {
goto unmap_curr_buff;
}
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = update_data_len;
areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
} else {
areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
}
}
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
return 0;
fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,
struct scatterlist *src, bool do_revert)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
u32 *prev_len = cc_next_buf_cnt(areq_ctx);
/*In case a pool was set, a table was
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
if (src && areq_ctx->in_nents) {
dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len) {
dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
/* clean the previous data length for update
* operation
*/
*prev_len = 0;
} else {
areq_ctx->buff_index ^= 1;
}
}
}
int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
......
......@@ -20,6 +20,7 @@
#include "cc_buffer_mgr.h"
#include "cc_debugfs.h"
#include "cc_cipher.h"
#include "cc_hash.h"
#include "cc_ivgen.h"
#include "cc_sram_mgr.h"
#include "cc_pm.h"
......@@ -286,8 +287,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
rc = cc_hash_alloc(new_drvdata);
if (rc) {
dev_err(dev, "cc_hash_alloc failed\n");
goto post_cipher_err;
}
return 0;
post_cipher_err:
cc_cipher_free(new_drvdata);
post_ivgen_err:
cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
......@@ -318,6 +328,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *drvdata =
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
cc_hash_free(drvdata);
cc_cipher_free(drvdata);
cc_ivgen_fini(drvdata);
cc_pm_fini(drvdata);
......@@ -406,6 +417,8 @@ static int __init ccree_init(void)
{
int ret;
cc_hash_global_init();
ret = cc_debugfs_global_init();
if (ret)
return ret;
......
......@@ -113,6 +113,7 @@ struct cc_drvdata {
cc_sram_addr_t mlli_sram_addr;
void *buff_mgr_handle;
void *cipher_handle;
void *hash_handle;
void *request_mgr_handle;
void *ivgen_handle;
void *sram_mgr_handle;
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
/* \file cc_hash.h
* ARM CryptoCell Hash Crypto API
*/
#ifndef __CC_HASH_H__
#define __CC_HASH_H__
#include "cc_buffer_mgr.h"
#define HMAC_IPAD_CONST 0x36363636
#define HMAC_OPAD_CONST 0x5C5C5C5C
#if (CC_DEV_SHA_MAX > 256)
#define HASH_LEN_SIZE 16
#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
#else
#define HASH_LEN_SIZE 8
#define CC_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
#define CC_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
#endif
#define XCBC_MAC_K1_OFFSET 0
#define XCBC_MAC_K2_OFFSET 16
#define XCBC_MAC_K3_OFFSET 32
#define CC_EXPORT_MAGIC 0xC2EE1070U
/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
* for xcbc/cmac statesize
*/
struct aeshash_state {
u8 state[AES_BLOCK_SIZE];
unsigned int count;
u8 buffer[AES_BLOCK_SIZE];
};
/* ahash state */
struct ahash_req_ctx {
u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
u8 digest_bytes_len[HASH_LEN_SIZE] ____cacheline_aligned;
struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
enum cc_req_dma_buf_type data_dma_buf_type;
dma_addr_t opad_digest_dma_addr;
dma_addr_t digest_buff_dma_addr;
dma_addr_t digest_bytes_len_dma_addr;
dma_addr_t digest_result_dma_addr;
u32 buf_cnt[2];
u32 buff_index;
u32 xcbc_count; /* count xcbc update operatations */
struct scatterlist buff_sg[2];
struct scatterlist *curr_sg;
u32 in_nents;
u32 mlli_nents;
struct mlli_params mlli_params;
};
static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
{
return &state->buf_cnt[state->buff_index];
}
static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
{
return state->buffers[state->buff_index];
}
static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
{
return &state->buf_cnt[state->buff_index ^ 1];
}
static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
{
return state->buffers[state->buff_index ^ 1];
}
int cc_hash_alloc(struct cc_drvdata *drvdata);
int cc_init_hash_sram(struct cc_drvdata *drvdata);
int cc_hash_free(struct cc_drvdata *drvdata);
/*!
* Gets the initial digest length
*
* \param drvdata
* \param mode The Hash mode. Supported modes:
* MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
* \return u32 returns the address of the initial digest length in SRAM
*/
cc_sram_addr_t
cc_digest_len_addr(void *drvdata, u32 mode);
/*!
* Gets the address of the initial digest in SRAM
* according to the given hash mode
*
* \param drvdata
* \param mode The Hash mode. Supported modes:
* MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
* \return u32 The address of the initial digest in SRAM
*/
cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
void cc_hash_global_init(void);
#endif /*__CC_HASH_H__*/
......@@ -9,6 +9,7 @@
#include "cc_request_mgr.h"
#include "cc_sram_mgr.h"
#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
#define POWER_DOWN_ENABLE 0x01
......@@ -61,6 +62,9 @@ int cc_pm_resume(struct device *dev)
return rc;
}
/* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
cc_init_iv_sram(drvdata);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment