Commit 5b14b2b3 authored by Giovanni Cabiddu's avatar Giovanni Cabiddu Committed by Herbert Xu

crypto: qat - enable deflate for QAT GEN4

Enable deflate for QAT GEN4 devices.

This adds
  (1) logic to create configuration entries at probe time for the
  compression instances for QAT GEN4 devices;
  (2) the implementation of QAT GEN4 specific compression operations,
  required since the creation of the compression request template is
  different between GEN2 and GEN4; and
  (3) updates to the firmware API related to compression for GEN4.

The implementation configures the device to produce data compressed
dynamically, optimized for throughput over compression ratio.
Signed-off-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarWojciech Ziemba <wojciech.ziemba@intel.com>
Reviewed-by: default avatarAdam Guerin <adam.guerin@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 1198ae56
......@@ -4,6 +4,7 @@
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
......@@ -357,10 +358,11 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_crypto_dev_config;
hw_data->dev_config = adf_gen4_dev_config;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
}
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
......
......@@ -70,6 +70,6 @@ enum icp_qat_4xxx_slice_mask {
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
#endif
......@@ -9,6 +9,7 @@
#include <adf_common_drv.h>
#include "adf_4xxx_hw_data.h"
#include "qat_compression.h"
#include "qat_crypto.h"
#include "adf_transport_access_macros.h"
......@@ -19,6 +20,16 @@ static const struct pci_device_id adf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
enum configs {
DEV_CFG_CY = 0,
DEV_CFG_DC,
};
static const char * const services_operations[] = {
ADF_CFG_CY,
ADF_CFG_DC,
};
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
if (accel_dev->hw_device) {
......@@ -53,7 +64,7 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
return 0;
}
int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
......@@ -68,14 +79,6 @@ int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
else
instances = 0;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
if (ret)
goto err;
ret = adf_cfg_section_add(accel_dev, "Accelerator0");
if (ret)
goto err;
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
......@@ -161,10 +164,122 @@ int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
return ret;
}
static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
int cpus = num_online_cpus();
unsigned long val;
int instances;
int ret;
int i;
if (adf_hw_dev_has_compression(accel_dev))
instances = min(cpus, banks);
else
instances = 0;
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 512;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 0;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = 1;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
key, &val, ADF_DEC);
if (ret)
goto err;
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
key, &val, ADF_DEC);
if (ret)
goto err;
}
val = i;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
&val, ADF_DEC);
if (ret)
goto err;
val = 0;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
&val, ADF_DEC);
if (ret)
goto err;
return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
return ret;
}
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
if (ret)
goto err;
ret = adf_cfg_section_add(accel_dev, "Accelerator0");
if (ret)
goto err;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
if (ret)
goto err;
ret = sysfs_match_string(services_operations, services);
if (ret < 0)
goto err;
switch (ret) {
case DEV_CFG_CY:
ret = adf_crypto_dev_config(accel_dev);
break;
case DEV_CFG_DC:
ret = adf_comp_dev_config(accel_dev);
break;
}
if (ret)
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return ret;
err:
dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
return ret;
}
......@@ -300,7 +415,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_disable_aer;
ret = adf_crypto_dev_config(accel_dev);
ret = hw_data->dev_config(accel_dev);
if (ret)
goto out_err_disable_aer;
......
......@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
#include "icp_qat_fw_comp.h"
#include "icp_qat_hw_20_comp.h"
#include "adf_gen4_dc.h"
static void qat_comp_build_deflate(void *ctx)
{
struct icp_qat_fw_comp_req *req_tmpl =
(struct icp_qat_fw_comp_req *)ctx;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
u32 upper_val;
u32 lower_val;
memset(req_tmpl, 0, sizeof(*req_tmpl));
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
QAT_COMN_PTR_TYPE_SGL);
header->serv_specif_flags =
ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
req_pars->req_par_flags =
ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
ICP_QAT_FW_COMP_EOP,
ICP_QAT_FW_COMP_BFINAL,
ICP_QAT_FW_COMP_CNV,
ICP_QAT_FW_COMP_CNV_RECOVERY,
ICP_QAT_FW_COMP_NO_CNV_DFX,
ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
ICP_QAT_FW_COMP_NO_XXHASH_ACC,
ICP_QAT_FW_COMP_CNV_ERROR_NONE,
ICP_QAT_FW_COMP_NO_APPEND_CRC,
ICP_QAT_FW_COMP_NO_DROP_DATA);
/* Fill second half of the template for decompression */
memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
req_tmpl++;
header = &req_tmpl->comn_hdr;
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
cd_pars = &req_tmpl->cd_pars;
hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
}
void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
{
dc_ops->build_deflate_ctx = qat_comp_build_deflate;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2022 Intel Corporation */
#ifndef ADF_GEN4_DC_H
#define ADF_GEN4_DC_H
#include "adf_accel_devices.h"
void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif /* ADF_GEN4_DC_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2022 Intel Corporation */
#ifndef _ICP_QAT_HW_20_COMP_H_
#define _ICP_QAT_HW_20_COMP_H_
#include "icp_qat_hw_20_comp_defs.h"
#include "icp_qat_fw.h"
struct icp_qat_hw_comp_20_config_csr_lower {
enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
enum icp_qat_hw_comp_20_hw_comp_format algo;
enum icp_qat_hw_comp_20_search_depth sd;
enum icp_qat_hw_comp_20_hbs_control hbs;
enum icp_qat_hw_comp_20_abd abd;
enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
enum icp_qat_hw_comp_20_min_match_control mmctrl;
enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
enum icp_qat_hw_comp_20_skip_hash_update hash_update;
enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
};
static inline __u32
ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
{
u32 val32 = 0;
QAT_FIELD_SET(val32, csr.algo,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
QAT_FIELD_SET(val32, csr.sd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
QAT_FIELD_SET(val32, csr.edmm,
ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
QAT_FIELD_SET(val32, csr.hbs,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lllbd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
QAT_FIELD_SET(val32, csr.mmctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.hash_col,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
QAT_FIELD_SET(val32, csr.hash_update,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
QAT_FIELD_SET(val32, csr.skip_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
return __builtin_bswap32(val32);
}
struct icp_qat_hw_comp_20_config_csr_upper {
enum icp_qat_hw_comp_20_scb_control scb_ctrl;
enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
enum icp_qat_hw_comp_20_som_control som_ctrl;
enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
enum icp_qat_hw_comp_20_lbms lbms;
enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
__u16 lazy;
__u16 nice;
};
static inline __u32
ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
{
u32 val32 = 0;
QAT_FIELD_SET(val32, csr.scb_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.rmb_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.som_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lbms,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
QAT_FIELD_SET(val32, csr.scb_mode_reset,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
QAT_FIELD_SET(val32, csr.lazy,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
QAT_FIELD_SET(val32, csr.nice,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
return __builtin_bswap32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_lower {
enum icp_qat_hw_decomp_20_hbs_control hbs;
enum icp_qat_hw_decomp_20_lbms lbms;
enum icp_qat_hw_decomp_20_hw_comp_format algo;
enum icp_qat_hw_decomp_20_min_match_control mmctrl;
enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
};
static inline __u32
ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
{
u32 val32 = 0;
QAT_FIELD_SET(val32, csr.hbs,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lbms,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
QAT_FIELD_SET(val32, csr.algo,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
QAT_FIELD_SET(val32, csr.mmctrl,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lbc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
return __builtin_bswap32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_upper {
enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
enum icp_qat_hw_decomp_20_mini_cam_control mcc;
};
static inline __u32
ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
{
u32 val32 = 0;
QAT_FIELD_SET(val32, csr.sdc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.mcc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
return __builtin_bswap32(val32);
}
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment