Commit fd77d8da authored by Damian Muszynski's avatar Damian Muszynski Committed by Herbert Xu

crypto: qat - add internal timer for qat 4xxx

The power management feature in QAT 4xxx devices can disable clock
sources used to implement timers. Because of that, the firmware needs to
get an external reliable source of time.

Add a kernel delayed work that periodically sends an event to the
firmware. This is triggered every 200ms. At each execution, the driver
sends a sync request to the firmware reporting the current timestamp
counter value.

This is a pre-requisite for enabling the heartbeat, telemetry and
rate limiting features.
Signed-off-by: default avatarDamian Muszynski <damian.muszynski@intel.com>
Reviewed-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 865b50fe
......@@ -8,6 +8,7 @@
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
......@@ -508,6 +509,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
hw_data->start_timer = adf_gen4_timer_start;
hw_data->stop_timer = adf_gen4_timer_stop;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
......
......@@ -17,6 +17,7 @@ intel_qat-objs := adf_cfg.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
adf_gen4_timer.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
......
......@@ -188,6 +188,8 @@ struct adf_hw_device_data {
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*start_timer)(struct adf_accel_dev *accel_dev);
void (*stop_timer)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
......@@ -296,6 +298,7 @@ struct adf_accel_dev {
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
struct adf_timer *timer;
union {
struct {
/* protects VF2PF interrupts access */
......
......@@ -241,6 +241,18 @@ int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u
return 0;
}
int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
{
u32 ae_mask = accel_dev->hw_device->ae_mask;
struct icp_qat_fw_init_admin_req req = { };
struct icp_qat_fw_init_admin_resp resp = { };
req.cmd_id = ICP_QAT_FW_SYNC;
req.int_timer_ticks = cnt;
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
......
......@@ -96,6 +96,7 @@ void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
......@@ -194,6 +195,8 @@ int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
int adf_init_misc_wq(void);
void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/container_of.h>
#include <linux/dev_printk.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_timer.h"
#define ADF_GEN4_TIMER_PERIOD_MS 200
/* This periodic update is used to trigger HB, RL & TL fw events */
static void work_handler(struct work_struct *work)
{
struct adf_accel_dev *accel_dev;
struct adf_timer *timer_ctx;
u32 time_periods;
timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
accel_dev = timer_ctx->accel_dev;
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
ADF_GEN4_TIMER_PERIOD_MS);
if (adf_send_admin_tim_sync(accel_dev, time_periods))
dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
}
int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx;
timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
if (!timer_ctx)
return -ENOMEM;
timer_ctx->accel_dev = accel_dev;
accel_dev->timer = timer_ctx;
timer_ctx->initial_ktime = ktime_get_real();
INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx = accel_dev->timer;
if (!timer_ctx)
return;
cancel_delayed_work_sync(&timer_ctx->work_ctx);
kfree(timer_ctx);
accel_dev->timer = NULL;
}
EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
#ifndef ADF_GEN4_TIMER_H_
#define ADF_GEN4_TIMER_H_
#include <linux/ktime.h>
#include <linux/workqueue.h>
struct adf_accel_dev;
struct adf_timer {
struct adf_accel_dev *accel_dev;
struct delayed_work work_ctx;
ktime_t initial_ktime;
};
int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
#endif /* ADF_GEN4_TIMER_H_ */
......@@ -163,6 +163,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
......@@ -187,6 +188,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
if (hw_data->start_timer) {
ret = hw_data->start_timer(accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
return ret;
}
}
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
......@@ -235,6 +244,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
*/
static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
bool wait = false;
......@@ -270,6 +280,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
}
}
if (hw_data->stop_timer)
hw_data->stop_timer(accel_dev);
if (wait)
msleep(100);
......
......@@ -380,3 +380,9 @@ bool adf_misc_wq_queue_work(struct work_struct *work)
{
return queue_work(adf_misc_wq, work);
}
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(adf_misc_wq, work, delay);
}
......@@ -37,6 +37,9 @@ struct icp_qat_fw_init_admin_req {
__u16 ibuf_size_in_kb;
__u16 resrvd3;
};
struct {
__u32 int_timer_ticks;
};
__u32 idle_filter;
};
......@@ -97,6 +100,8 @@ struct icp_qat_fw_init_admin_resp {
};
} __packed;
#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment