Commit d9fb8408 authored by Damian Muszynski's avatar Damian Muszynski Committed by Herbert Xu

crypto: qat - add rate limiting feature to qat_4xxx

The Rate Limiting (RL) feature allows to control the rate of requests
that can be submitted on a ring pair (RP). This allows sharing a QAT
device among multiple users while ensuring a guaranteed throughput.

The driver provides a mechanism that allows users to set policies, that
are programmed to the device. The device is then enforcing those policies.

Configuration of RL is accomplished through entities called SLAs
(Service Level Agreement). Each SLA object gets a unique identifier
and defines the limitations for a single service across up to four
ring pairs (RPs count allocated to a single VF).

The rate is determined using two fields:
  * CIR (Committed Information Rate), i.e., the guaranteed rate.
  * PIR (Peak Information Rate), i.e., the maximum rate achievable
    when the device has available resources.
The rate values are expressed in permille scale i.e. 0-1000.
Ring pair selection is achieved by providing a 64-bit mask, where
each bit corresponds to one of the ring pairs.

This adds an interface and logic that allow to add, update, retrieve
and remove an SLA.
Signed-off-by: default avatarDamian Muszynski <damian.muszynski@intel.com>
Reviewed-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarTero Kristo <tero.kristo@linux.intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c7fd5379
...@@ -343,6 +343,24 @@ static u32 get_heartbeat_clock(struct adf_hw_device_data *self) ...@@ -343,6 +343,24 @@ static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
return ADF_4XXX_KPT_COUNTER_FREQ; return ADF_4XXX_KPT_COUNTER_FREQ;
} }
static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
{
rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
}
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{ {
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
...@@ -594,12 +612,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) ...@@ -594,12 +612,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->stop_timer = adf_gen4_timer_stop; hw_data->stop_timer = adf_gen4_timer_stop;
hw_data->get_hb_clock = get_heartbeat_clock; hw_data->get_hb_clock = get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_init_rl_data(&hw_data->rl_data);
} }
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
......
...@@ -82,8 +82,19 @@ ...@@ -82,8 +82,19 @@
#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
/* RL constants */
#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100
#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102
#define ADF_4XXX_RL_DCPR_CORRECTION 1
#define ADF_4XXX_RL_SCANS_PER_SEC 954
#define ADF_4XXX_RL_MAX_TP_ASYM 173750UL
#define ADF_4XXX_RL_MAX_TP_SYM 95000UL
#define ADF_4XXX_RL_MAX_TP_DC 45000UL
#define ADF_4XXX_RL_SLICE_REF 1000UL
/* Clocks frequency */ /* Clocks frequency */
#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) #define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ)
/* qat_4xxx fuse bits are different from old GENs, redefine them */ /* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask { enum icp_qat_4xxx_slice_mask {
......
...@@ -28,6 +28,8 @@ intel_qat-objs := adf_cfg.o \ ...@@ -28,6 +28,8 @@ intel_qat-objs := adf_cfg.o \
qat_algs.o \ qat_algs.o \
qat_asym_algs.o \ qat_asym_algs.o \
qat_algs_send.o \ qat_algs_send.o \
adf_rl.o \
adf_rl_admin.o \
qat_uclo.o \ qat_uclo.o \
qat_hal.o \ qat_hal.o \
qat_bl.o qat_bl.o
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/types.h> #include <linux/types.h>
#include "adf_cfg_common.h" #include "adf_cfg_common.h"
#include "adf_rl.h"
#include "adf_pfvf_msg.h" #include "adf_pfvf_msg.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
...@@ -247,6 +248,7 @@ struct adf_hw_device_data { ...@@ -247,6 +248,7 @@ struct adf_hw_device_data {
struct adf_dc_ops dc_ops; struct adf_dc_ops dc_ops;
struct adf_ras_ops ras_ops; struct adf_ras_ops ras_ops;
struct adf_dev_err_mask dev_err_mask; struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
const char *fw_name; const char *fw_name;
const char *fw_mmp_name; const char *fw_mmp_name;
u32 fuses; u32 fuses;
...@@ -358,6 +360,7 @@ struct adf_accel_dev { ...@@ -358,6 +360,7 @@ struct adf_accel_dev {
struct adf_accel_pci accel_pci_dev; struct adf_accel_pci accel_pci_dev;
struct adf_timer *timer; struct adf_timer *timer;
struct adf_heartbeat *heartbeat; struct adf_heartbeat *heartbeat;
struct adf_rl *rate_limiting;
union { union {
struct { struct {
/* protects VF2PF interrupts access */ /* protects VF2PF interrupts access */
......
...@@ -330,6 +330,53 @@ static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) ...@@ -330,6 +330,53 @@ static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps)
return 0; return 0;
} }
int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev,
struct icp_qat_fw_init_admin_slice_cnt *slices)
{
u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
struct icp_qat_fw_init_admin_resp resp = { };
struct icp_qat_fw_init_admin_req req = { };
int ret;
req.cmd_id = ICP_QAT_FW_RL_INIT;
ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
if (ret)
return ret;
memcpy(slices, &resp.slices, sizeof(*slices));
return 0;
}
int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev,
struct icp_qat_fw_init_admin_req *req)
{
u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
struct icp_qat_fw_init_admin_resp resp = { };
/*
* req struct filled in rl implementation. Used commands
* ICP_QAT_FW_RL_ADD for a new SLA
* ICP_QAT_FW_RL_UPDATE for update SLA
*/
return adf_send_admin(accel_dev, req, &resp, ae_mask);
}
int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
u8 node_type)
{
u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
struct icp_qat_fw_init_admin_resp resp = { };
struct icp_qat_fw_init_admin_req req = { };
req.cmd_id = ICP_QAT_FW_RL_REMOVE;
req.node_id = node_id;
req.node_type = node_type;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
/** /**
* adf_send_admin_init() - Function sends init message to FW * adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device. * @accel_dev: Pointer to acceleration device.
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#ifndef ADF_ADMIN #ifndef ADF_ADMIN
#define ADF_ADMIN #define ADF_ADMIN
#include "icp_qat_fw_init_admin.h"
struct adf_accel_dev; struct adf_accel_dev;
int adf_init_admin_comms(struct adf_accel_dev *accel_dev); int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
...@@ -12,6 +14,12 @@ int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u ...@@ -12,6 +14,12 @@ int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u
int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev,
struct icp_qat_fw_init_admin_slice_cnt *slices);
int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev,
struct icp_qat_fw_init_admin_req *req);
int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
u8 node_type);
int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size);
int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err);
......
...@@ -139,6 +139,13 @@ do { \ ...@@ -139,6 +139,13 @@ do { \
/* Number of heartbeat counter pairs */ /* Number of heartbeat counter pairs */
#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE #define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
/* Rate Limiting */
#define ADF_GEN4_RL_R2L_OFFSET 0x508000
#define ADF_GEN4_RL_L2C_OFFSET 0x509000
#define ADF_GEN4_RL_C2S_OFFSET 0x508818
#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "adf_common_drv.h" #include "adf_common_drv.h"
#include "adf_dbgfs.h" #include "adf_dbgfs.h"
#include "adf_heartbeat.h" #include "adf_heartbeat.h"
#include "adf_rl.h"
#include "adf_sysfs_ras_counters.h" #include "adf_sysfs_ras_counters.h"
static LIST_HEAD(service_table); static LIST_HEAD(service_table);
...@@ -137,6 +138,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) ...@@ -137,6 +138,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
} }
adf_heartbeat_init(accel_dev); adf_heartbeat_init(accel_dev);
ret = adf_rl_init(accel_dev);
if (ret && ret != -EOPNOTSUPP)
return ret;
/* /*
* Subservice initialisation is divided into two stages: init and start. * Subservice initialisation is divided into two stages: init and start.
...@@ -212,6 +216,9 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) ...@@ -212,6 +216,9 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
} }
adf_heartbeat_start(accel_dev); adf_heartbeat_start(accel_dev);
ret = adf_rl_start(accel_dev);
if (ret && ret != -EOPNOTSUPP)
return ret;
list_for_each_entry(service, &service_table, list) { list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_START)) { if (service->event_hld(accel_dev, ADF_EVENT_START)) {
...@@ -272,6 +279,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) ...@@ -272,6 +279,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return; return;
adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev); adf_dbgfs_rm(accel_dev);
adf_sysfs_stop_ras(accel_dev); adf_sysfs_stop_ras(accel_dev);
...@@ -359,6 +367,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) ...@@ -359,6 +367,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, service->init_status); clear_bit(accel_dev->accel_id, service->init_status);
} }
adf_rl_exit(accel_dev);
if (hw_data->ras_ops.disable_ras_errors) if (hw_data->ras_ops.disable_ras_errors)
hw_data->ras_ops.disable_ras_errors(accel_dev); hw_data->ras_ops.disable_ras_errors(accel_dev);
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
#ifndef ADF_RL_H_
#define ADF_RL_H_
#include <linux/mutex.h>
#include <linux/types.h>
struct adf_accel_dev;
#define RL_ROOT_MAX 4
#define RL_CLUSTER_MAX 16
#define RL_LEAF_MAX 64
#define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX)
#define RL_RP_CNT_PER_LEAF_MAX 4U
#define RL_RP_CNT_MAX 64
#define RL_SLA_EMPTY_ID -1
#define RL_PARENT_DEFAULT_ID -1
enum rl_node_type {
RL_ROOT,
RL_CLUSTER,
RL_LEAF,
};
enum adf_base_services {
ADF_SVC_ASYM = 0,
ADF_SVC_SYM,
ADF_SVC_DC,
ADF_SVC_NONE,
};
/**
* struct adf_rl_sla_input_data - ratelimiting user input data structure
* @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA.
* Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned.
* @sla_id: ID of current SLA for operations update, rm, get. For the add
* operation, this field will be updated with the ID of the newly
* added SLA
* @parent_id: ID of the SLA to which the current one should be assigned.
* Set to -1 to refer to the default parent.
* @cir: Committed information rate. Rate guaranteed to be achieved. Input value
* is expressed in permille scale, i.e. 1000 refers to the maximum
* device throughput for a selected service.
* @pir: Peak information rate. Maximum rate available that the SLA can achieve.
* Input value is expressed in permille scale, i.e. 1000 refers to
* the maximum device throughput for a selected service.
* @type: SLA type: root, cluster, node
* @srv: Service associated to the SLA: asym, sym dc.
*
* This structure is used to perform operations on an SLA.
* Depending on the operation, some of the parameters are ignored.
* The following list reports which parameters should be set for each operation.
* - add: all except sla_id
* - update: cir, pir, sla_id
* - rm: sla_id
* - rm_all: -
* - get: sla_id
* - get_capability_rem: srv, sla_id
*/
struct adf_rl_sla_input_data {
u64 rp_mask;
int sla_id;
int parent_id;
unsigned int cir;
unsigned int pir;
enum rl_node_type type;
enum adf_base_services srv;
};
struct rl_slice_cnt {
u8 dcpr_cnt;
u8 pke_cnt;
u8 cph_cnt;
};
struct adf_rl_hw_data {
u32 scale_ref;
u32 scan_interval;
u32 r2l_offset;
u32 l2c_offset;
u32 c2s_offset;
u32 pciin_tb_offset;
u32 pciout_tb_offset;
u32 pcie_scale_mul;
u32 pcie_scale_div;
u32 dcpr_correction;
u32 max_tp[RL_ROOT_MAX];
struct rl_slice_cnt slices;
};
/**
* struct adf_rl - ratelimiting data structure
* @accel_dev: pointer to acceleration device data
* @device_data: pointer to rate limiting data specific to a device type (or revision)
* @sla: array of pointers to SLA objects
* @root: array of pointers to root type SLAs, element number reflects node_id
* @cluster: array of pointers to cluster type SLAs, element number reflects node_id
* @leaf: array of pointers to leaf type SLAs, element number reflects node_id
* @rp_in_use: array of ring pair IDs already used in one of SLAs
* @rl_lock: mutex object which is protecting data in this structure
* @input: structure which is used for holding the data received from user
*/
struct adf_rl {
struct adf_accel_dev *accel_dev;
struct adf_rl_hw_data *device_data;
/* mapping sla_id to SLA objects */
struct rl_sla *sla[RL_NODES_CNT_MAX];
struct rl_sla *root[RL_ROOT_MAX];
struct rl_sla *cluster[RL_CLUSTER_MAX];
struct rl_sla *leaf[RL_LEAF_MAX];
bool rp_in_use[RL_RP_CNT_MAX];
/* Mutex protecting writing to SLAs lists */
struct mutex rl_lock;
};
/**
* struct rl_sla - SLA object data structure
* @parent: pointer to the parent SLA (root/cluster)
* @type: SLA type
* @srv: service associated with this SLA
* @sla_id: ID of the SLA, used as element number in SLA array and as identifier
* shared with the user
* @node_id: ID of node, each of SLA type have a separate ID list
* @cir: committed information rate
* @pir: peak information rate (PIR >= CIR)
* @rem_cir: if this SLA is a parent then this field represents a remaining
* value to be used by child SLAs.
* @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA
* @ring_pairs_cnt: number of assigned ring pairs listed in the array above
*/
struct rl_sla {
struct rl_sla *parent;
enum rl_node_type type;
enum adf_base_services srv;
u32 sla_id;
u32 node_id;
u32 cir;
u32 pir;
u32 rem_cir;
u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX];
u16 ring_pairs_cnt;
};
int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
struct adf_rl_sla_input_data *sla_in);
int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
struct adf_rl_sla_input_data *sla_in);
int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
struct adf_rl_sla_input_data *sla_in);
int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
enum adf_base_services srv, int sla_id);
int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id);
void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default);
int adf_rl_init(struct adf_accel_dev *accel_dev);
int adf_rl_start(struct adf_accel_dev *accel_dev);
void adf_rl_stop(struct adf_accel_dev *accel_dev);
void adf_rl_exit(struct adf_accel_dev *accel_dev);
u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type, bool is_bw_out);
u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type);
u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type);
#endif /* ADF_RL_H_ */
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include "adf_admin.h"
#include "adf_accel_devices.h"
#include "adf_rl_admin.h"
static void
prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr,
struct icp_qat_fw_init_admin_sla_config_params *fw_params,
struct icp_qat_fw_init_admin_req *req, bool is_update)
{
req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD;
req->init_cfg_ptr = dma_addr;
req->init_cfg_sz = sizeof(*fw_params);
req->node_id = sla->node_id;
req->node_type = sla->type;
req->rp_count = sla->ring_pairs_cnt;
req->svc_type = sla->srv;
}
static void
prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
struct icp_qat_fw_init_admin_sla_config_params *fw_params)
{
fw_params->pcie_in_cir =
adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false);
fw_params->pcie_in_pir =
adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false);
fw_params->pcie_out_cir =
adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true);
fw_params->pcie_out_pir =
adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true);
fw_params->slice_util_cir =
adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv);
fw_params->slice_util_pir =
adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv);
fw_params->ae_util_cir =
adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv);
fw_params->ae_util_pir =
adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv);
memcpy(fw_params->rp_ids, sla->ring_pairs_ids,
sizeof(sla->ring_pairs_ids));
}
int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
struct rl_slice_cnt *slices_int)
{
struct icp_qat_fw_init_admin_slice_cnt slices_resp = { };
int ret;
ret = adf_send_admin_rl_init(accel_dev, &slices_resp);
if (ret)
return ret;
slices_int->dcpr_cnt = slices_resp.dcpr_cnt;
slices_int->pke_cnt = slices_resp.pke_cnt;
/* For symmetric crypto, slice tokens are relative to the UCS slice */
slices_int->cph_cnt = slices_resp.ucs_cnt;
return 0;
}
int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev,
struct rl_sla *sla, bool is_update)
{
struct icp_qat_fw_init_admin_sla_config_params *fw_params;
struct icp_qat_fw_init_admin_req req = { };
dma_addr_t dma_addr;
int ret;
fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params),
&dma_addr, GFP_KERNEL);
if (!fw_params)
return -ENOMEM;
prep_admin_req_params(accel_dev, sla, fw_params);
prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update);
ret = adf_send_admin_rl_add_update(accel_dev, &req);
dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params,
dma_addr);
return ret;
}
int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id,
u8 node_type)
{
return adf_send_admin_rl_delete(accel_dev, node_id, node_type);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
#ifndef ADF_RL_ADMIN_H_
#define ADF_RL_ADMIN_H_
#include <linux/types.h>
#include "adf_rl.h"
int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
struct rl_slice_cnt *slices_int);
int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev,
struct rl_sla *sla, bool is_update);
int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id,
u8 node_type);
#endif /* ADF_RL_ADMIN_H_ */
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include "icp_qat_fw.h" #include "icp_qat_fw.h"
#define RL_MAX_RP_IDS 16
enum icp_qat_fw_init_admin_cmd_id { enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_INIT_AE = 0, ICP_QAT_FW_INIT_AE = 0,
ICP_QAT_FW_TRNG_ENABLE = 1, ICP_QAT_FW_TRNG_ENABLE = 1,
...@@ -19,10 +21,14 @@ enum icp_qat_fw_init_admin_cmd_id { ...@@ -19,10 +21,14 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10,
ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_DC_CHAIN_INIT = 11,
ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
ICP_QAT_FW_RL_INIT = 15,
ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_TIMER_GET = 19,
ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_CNV_STATS_GET = 20,
ICP_QAT_FW_PM_STATE_CONFIG = 128, ICP_QAT_FW_PM_STATE_CONFIG = 128,
ICP_QAT_FW_PM_INFO = 129, ICP_QAT_FW_PM_INFO = 129,
ICP_QAT_FW_RL_ADD = 134,
ICP_QAT_FW_RL_UPDATE = 135,
ICP_QAT_FW_RL_REMOVE = 136,
}; };
enum icp_qat_fw_init_admin_resp_status { enum icp_qat_fw_init_admin_resp_status {
...@@ -30,6 +36,30 @@ enum icp_qat_fw_init_admin_resp_status { ...@@ -30,6 +36,30 @@ enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_FAIL ICP_QAT_FW_INIT_RESP_STATUS_FAIL
}; };
struct icp_qat_fw_init_admin_slice_cnt {
__u8 cpr_cnt;
__u8 xlt_cnt;
__u8 dcpr_cnt;
__u8 pke_cnt;
__u8 wat_cnt;
__u8 wcp_cnt;
__u8 ucs_cnt;
__u8 cph_cnt;
__u8 ath_cnt;
};
struct icp_qat_fw_init_admin_sla_config_params {
__u32 pcie_in_cir;
__u32 pcie_in_pir;
__u32 pcie_out_cir;
__u32 pcie_out_pir;
__u32 slice_util_cir;
__u32 slice_util_pir;
__u32 ae_util_cir;
__u32 ae_util_pir;
__u16 rp_ids[RL_MAX_RP_IDS];
};
struct icp_qat_fw_init_admin_req { struct icp_qat_fw_init_admin_req {
__u16 init_cfg_sz; __u16 init_cfg_sz;
__u8 resrvd1; __u8 resrvd1;
...@@ -49,6 +79,13 @@ struct icp_qat_fw_init_admin_req { ...@@ -49,6 +79,13 @@ struct icp_qat_fw_init_admin_req {
struct { struct {
__u32 heartbeat_ticks; __u32 heartbeat_ticks;
}; };
struct {
__u16 node_id;
__u8 node_type;
__u8 svc_type;
__u8 resrvd5[3];
__u8 rp_count;
};
__u32 idle_filter; __u32 idle_filter;
}; };
...@@ -110,6 +147,7 @@ struct icp_qat_fw_init_admin_resp { ...@@ -110,6 +147,7 @@ struct icp_qat_fw_init_admin_resp {
__u32 unsuccessful_count; __u32 unsuccessful_count;
__u64 resrvd8; __u64 resrvd8;
}; };
struct icp_qat_fw_init_admin_slice_cnt slices;
__u16 fw_capabilities; __u16 fw_capabilities;
}; };
} __packed; } __packed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment