Commit 8b64e512 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

staging: ccree: refactor HW command FIFO access

The programming of the HW command FIFO in ccree was done via
a set of macros which suffer a few problems:
- Use of macros rather than inline leaves out parameter type
  checking and risks multiple macro parameter evaluation side
  effects.
- Implemented via hand rolled versions of bitfield operations.

This patch refactors the HW command queue access into a set
of inline functions using generic kernel bitfield access
infrastructure, thus resolving the above issues and opening
the way later on to drop the hand rolled bitfield macros
once additional users are dropped in later patches in the
series.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6562e7db
......@@ -21,8 +21,12 @@
#ifndef _CC_BITOPS_H_
#define _CC_BITOPS_H_
#define BITMASK(mask_size) (((mask_size) < 32) ? \
((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
#include <linux/bitops.h>
#include <linux/bitfield.h>
#define BITMASK(mask_size) (((mask_size) < 32) ? \
((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
#define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset))
#define BITFIELD_GET(word, bit_offset, bit_size) \
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -41,16 +41,16 @@
#include "dx_reg_base_host.h"
#include "dx_host.h"
#define DX_CC_HOST_VIRT /* must be defined before including dx_cc_regs.h */
#include "cc_hw_queue_defs.h"
#include "cc_regs.h"
#include "dx_reg_common.h"
#include "cc_hal.h"
#include "ssi_sram_mgr.h"
#define CC_SUPPORT_SHA DX_DEV_SHA_MAX
#include "cc_crypto_ctx.h"
#include "ssi_sysfs.h"
#include "hash_defs.h"
#include "ssi_fips_local.h"
#include "cc_hw_queue_defs.h"
#include "ssi_sram_mgr.h"
#define DRV_MODULE_VERSION "3.0"
......
This diff is collapsed.
This diff is collapsed.
......@@ -69,37 +69,37 @@ static int ssi_ivgen_generate_pool(
return -EINVAL;
}
/* Setup key */
HW_DESC_INIT(&iv_seq[idx]);
HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_KEY0);
HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
hw_desc_init(&iv_seq[idx]);
set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
idx++;
/* Setup cipher state */
HW_DESC_INIT(&iv_seq[idx]);
HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
hw_desc_init(&iv_seq[idx]);
set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
idx++;
/* Perform dummy encrypt to skip first block */
HW_DESC_INIT(&iv_seq[idx]);
HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, CC_AES_IV_SIZE);
HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
hw_desc_init(&iv_seq[idx]);
set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
idx++;
/* Generate IV pool */
HW_DESC_INIT(&iv_seq[idx]);
HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
hw_desc_init(&iv_seq[idx]);
set_din_const(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
idx++;
*iv_seq_len = idx; /* Update sequence length */
......@@ -133,13 +133,12 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
/* Copy initial enc. key and IV to SRAM at a single descriptor */
HW_DESC_INIT(&iv_seq[iv_seq_len]);
HW_DESC_SET_DIN_TYPE(&iv_seq[iv_seq_len], DMA_DLLI,
ivgen_ctx->pool_meta_dma, SSI_IVPOOL_META_SIZE,
NS_BIT);
HW_DESC_SET_DOUT_SRAM(&iv_seq[iv_seq_len], ivgen_ctx->pool,
SSI_IVPOOL_META_SIZE);
HW_DESC_SET_FLOW_MODE(&iv_seq[iv_seq_len], BYPASS);
hw_desc_init(&iv_seq[iv_seq_len]);
set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
SSI_IVPOOL_META_SIZE, NS_BIT);
set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
SSI_IVPOOL_META_SIZE);
set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
iv_seq_len++;
/* Generate initial pool */
......@@ -268,22 +267,22 @@ int ssi_ivgen_getiv(
for (t = 0; t < iv_out_dma_len; t++) {
/* Acquire IV from pool */
HW_DESC_INIT(&iv_seq[idx]);
HW_DESC_SET_DIN_SRAM(&iv_seq[idx],
ivgen_ctx->pool + ivgen_ctx->next_iv_ofs,
iv_out_size);
HW_DESC_SET_DOUT_DLLI(&iv_seq[idx], iv_out_dma[t],
iv_out_size, NS_BIT, 0);
HW_DESC_SET_FLOW_MODE(&iv_seq[idx], BYPASS);
hw_desc_init(&iv_seq[idx]);
set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
ivgen_ctx->next_iv_ofs),
iv_out_size);
set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
NS_BIT, 0);
set_flow_mode(&iv_seq[idx], BYPASS);
idx++;
}
/* Bypass operation is proceeded by crypto sequence, hence must
* assure bypass-write-transaction by a memory barrier
*/
HW_DESC_INIT(&iv_seq[idx]);
HW_DESC_SET_DIN_NO_DMA(&iv_seq[idx], 0, 0xfffff0);
HW_DESC_SET_DOUT_NO_DMA(&iv_seq[idx], 0, 0, 1);
hw_desc_init(&iv_seq[idx]);
set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
idx++;
*iv_seq_len = idx; /* update seq length */
......
......@@ -47,7 +47,7 @@
*/
#define INIT_CC_MONITOR_DESC(desc_p) \
do { \
HW_DESC_INIT(desc_p); \
hw_desc_init(desc_p); \
HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \
} while (0)
......@@ -73,9 +73,9 @@ do { \
do { \
if ((is_monitored) == true) { \
struct cc_hw_desc barrier_desc; \
HW_DESC_INIT(&barrier_desc); \
HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \
HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \
hw_desc_init(&barrier_desc); \
set_din_no_dma(&barrier_desc, 0, 0xfffff0); \
set_dout_no_dma(&barrier_desc, 0, 0, 1); \
enqueue_seq((cc_base_addr), &barrier_desc, 1); \
enqueue_seq((cc_base_addr), (desc_p), 1); \
} \
......@@ -224,13 +224,12 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
sizeof(u32));
/* Init. "dummy" completion descriptor */
HW_DESC_INIT(&req_mgr_h->compl_desc);
HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(u32));
HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc,
req_mgr_h->dummy_comp_buff_dma,
sizeof(u32), NS_BIT, 1);
HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS);
HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc);
hw_desc_init(&req_mgr_h->compl_desc);
set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
sizeof(u32), NS_BIT, 1);
set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
set_queue_last_ind(&req_mgr_h->compl_desc);
#ifdef CC_CYCLE_COUNT
/* For CC-HW cycle performance trace */
......@@ -519,7 +518,7 @@ int send_request_init(
if (unlikely(rc != 0 )) {
return rc;
}
HW_DESC_SET_QUEUE_LAST_IND(&desc[len-1]);
set_queue_last_ind(&desc[(len - 1)]);
enqueue_seq(cc_base, desc, len);
......
......@@ -127,10 +127,10 @@ void ssi_sram_mgr_const2sram_desc(
unsigned int idx = *seq_len;
for (i = 0; i < nelement; i++, idx++) {
HW_DESC_INIT(&seq[idx]);
HW_DESC_SET_DIN_CONST(&seq[idx], src[i], sizeof(u32));
HW_DESC_SET_DOUT_SRAM(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
HW_DESC_SET_FLOW_MODE(&seq[idx], BYPASS);
hw_desc_init(&seq[idx]);
set_din_const(&seq[idx], src[i], sizeof(u32));
set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
set_flow_mode(&seq[idx], BYPASS);
}
*seq_len = idx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment