Commit 64a6301e authored by Hui Tang's avatar Hui Tang Committed by Herbert Xu

crypto: hisilicon/hpre - add debugfs for Hisilicon HPRE

Add debugfs to provides IO operation debug information
and add BD processing timeout count function
Signed-off-by: default avatarHui Tang <tanghui20@huawei.com>
Signed-off-by: default avatarLongfang Liu <liulongfang@huawei.com>
Signed-off-by: default avatarShukun Tan <tanshukun1@huawei.com>
Reviewed-by: default avatarZaibo Xu <xuzaibo@huawei.com>
Reviewed-by: default avatarZhou Wang <wangzhou1@hisilicon.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 8213a1a6
...@@ -93,3 +93,48 @@ Contact: linux-crypto@vger.kernel.org ...@@ -93,3 +93,48 @@ Contact: linux-crypto@vger.kernel.org
Description: Dump the status of the QM. Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed. Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on HPRE. Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/recv_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_busy_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of requests sent
with returning busy.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_fail_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of completed but error requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/invalid_req_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of invalid requests being received.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/overtime_thrhld
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Set the threshold time for counting the request which is
processed longer than the threshold.
0: disable(default), 1: 1 microsecond.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/over_thrhld_cnt
Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of time out requests.
Available for both PF and VF, and take no other effect on HPRE.
...@@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file { ...@@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM, HPRE_DEBUG_FILE_NUM,
}; };
enum hpre_dfx_dbgfs_file {
HPRE_SEND_CNT,
HPRE_RECV_CNT,
HPRE_SEND_FAIL_CNT,
HPRE_SEND_BUSY_CNT,
HPRE_OVER_THRHLD_CNT,
HPRE_OVERTIME_THRHLD,
HPRE_INVALID_REQ_CNT,
HPRE_DFX_FILE_NUM
};
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1) #define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file { struct hpre_debugfs_file {
...@@ -34,6 +45,11 @@ struct hpre_debugfs_file { ...@@ -34,6 +45,11 @@ struct hpre_debugfs_file {
struct hpre_debug *debug; struct hpre_debug *debug;
}; };
struct hpre_dfx {
atomic64_t value;
enum hpre_dfx_dbgfs_file type;
};
/* /*
* One HPRE controller has one PF and multiple VFs, some global configurations * One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure. * which PF has need this structure.
...@@ -41,6 +57,7 @@ struct hpre_debugfs_file { ...@@ -41,6 +57,7 @@ struct hpre_debugfs_file {
*/ */
struct hpre_debug { struct hpre_debug {
struct dentry *debug_root; struct dentry *debug_root;
struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM]; struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
}; };
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/fips.h> #include <linux/fips.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/time.h>
#include "hpre.h" #include "hpre.h"
struct hpre_ctx; struct hpre_ctx;
...@@ -32,6 +33,9 @@ struct hpre_ctx; ...@@ -32,6 +33,9 @@ struct hpre_ctx;
#define HPRE_SQE_DONE_SHIFT 30 #define HPRE_SQE_DONE_SHIFT 30
#define HPRE_DH_MAX_P_SZ 512 #define HPRE_DH_MAX_P_SZ 512
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx { struct hpre_rsa_ctx {
...@@ -68,6 +72,7 @@ struct hpre_dh_ctx { ...@@ -68,6 +72,7 @@ struct hpre_dh_ctx {
struct hpre_ctx { struct hpre_ctx {
struct hisi_qp *qp; struct hisi_qp *qp;
struct hpre_asym_request **req_list; struct hpre_asym_request **req_list;
struct hpre *hpre;
spinlock_t req_lock; spinlock_t req_lock;
unsigned int key_sz; unsigned int key_sz;
bool crt_g2_mode; bool crt_g2_mode;
...@@ -90,6 +95,7 @@ struct hpre_asym_request { ...@@ -90,6 +95,7 @@ struct hpre_asym_request {
int err; int err;
int req_id; int req_id;
hpre_cb cb; hpre_cb cb;
struct timespec64 req_time;
}; };
static DEFINE_MUTEX(hpre_alg_lock); static DEFINE_MUTEX(hpre_alg_lock);
...@@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) ...@@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{ {
struct hpre_ctx *ctx; struct hpre_ctx *ctx;
struct hpre_dfx *dfx;
int id; int id;
ctx = hpre_req->ctx; ctx = hpre_req->ctx;
...@@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) ...@@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req; ctx->req_list[id] = hpre_req;
hpre_req->req_id = id; hpre_req->req_id = id;
dfx = ctx->hpre->debug.dfx;
if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
ktime_get_ts64(&hpre_req->req_time);
return id; return id;
} }
...@@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, ...@@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{ {
struct hpre *hpre;
if (!ctx || !qp || qlen < 0) if (!ctx || !qp || qlen < 0)
return -EINVAL; return -EINVAL;
spin_lock_init(&ctx->req_lock); spin_lock_init(&ctx->req_lock);
ctx->qp = qp; ctx->qp = qp;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list) if (!ctx->req_list)
return -ENOMEM; return -ENOMEM;
...@@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) ...@@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0; ctx->key_sz = 0;
} }
static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
u64 overtime_thrhld)
{
struct timespec64 reply_time;
u64 time_use_us;
ktime_get_ts64(&reply_time);
time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
HPRE_DFX_SEC_TO_US +
(reply_time.tv_nsec - req->req_time.tv_nsec) /
HPRE_DFX_US_TO_NS;
if (time_use_us <= overtime_thrhld)
return false;
return true;
}
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{ {
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req; struct hpre_asym_request *req;
struct kpp_request *areq; struct kpp_request *areq;
u64 overtime_thrhld;
int ret; int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh; areq = req->areq.dh;
areq->dst_len = ctx->key_sz; areq->dst_len = ctx->key_sz;
overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret); kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
} }
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{ {
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req; struct hpre_asym_request *req;
struct akcipher_request *areq; struct akcipher_request *areq;
u64 overtime_thrhld;
int ret; int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
areq = req->areq.rsa; areq = req->areq.rsa;
areq->dst_len = ctx->key_sz; areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret); akcipher_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
} }
static void hpre_alg_cb(struct hisi_qp *qp, void *resp) static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{ {
struct hpre_ctx *ctx = qp->qp_ctx; struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_sqe *sqe = resp; struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
return;
}
req->cb(ctx, resp);
} }
static int hpre_ctx_init(struct hpre_ctx *ctx) static int hpre_ctx_init(struct hpre_ctx *ctx)
...@@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) ...@@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0; return 0;
} }
static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
{
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
int ctr = 0;
int ret;
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
ret = hisi_qp_send(ctx->qp, msg);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
} while (ctr++ < HPRE_TRY_SEND_TIMES);
if (likely(!ret))
return ret;
if (ret != -EBUSY)
atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
return ret;
}
#ifdef CONFIG_CRYPTO_DH #ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req) static int hpre_dh_compute_value(struct kpp_request *req)
{ {
...@@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req) ...@@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req); void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req; struct hpre_sqe *msg = &hpre_req->req;
int ctr = 0;
int ret; int ret;
ret = hpre_msg_request_set(ctx, req, false); ret = hpre_msg_request_set(ctx, req, false);
...@@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req) ...@@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */ /* success */
ret = hpre_send(ctx, msg);
if (likely(!ret)) if (likely(!ret))
return -EINPROGRESS; return -EINPROGRESS;
...@@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req) ...@@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req); void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req; struct hpre_sqe *msg = &hpre_req->req;
int ctr = 0;
int ret; int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */ /* For 512 and 1536 bits key size, use soft tfm instead */
...@@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req) ...@@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret)) if (unlikely(ret))
goto clear_all; goto clear_all;
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */ /* success */
ret = hpre_send(ctx, msg);
if (likely(!ret)) if (likely(!ret))
return -EINPROGRESS; return -EINPROGRESS;
...@@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req) ...@@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req); void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req; struct hpre_sqe *msg = &hpre_req->req;
int ctr = 0;
int ret; int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */ /* For 512 and 1536 bits key size, use soft tfm instead */
...@@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req) ...@@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret)) if (unlikely(ret))
goto clear_all; goto clear_all;
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */ /* success */
ret = hpre_send(ctx, msg);
if (likely(!ret)) if (likely(!ret))
return -EINPROGRESS; return -EINPROGRESS;
......
...@@ -159,6 +159,16 @@ static const struct debugfs_reg32 hpre_com_dfx_regs[] = { ...@@ -159,6 +159,16 @@ static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"INT_STATUS ", HPRE_INT_STATUS}, {"INT_STATUS ", HPRE_INT_STATUS},
}; };
static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
"send_cnt",
"recv_cnt",
"send_fail_cnt",
"send_busy_cnt",
"over_thrhld_cnt",
"overtime_thrhld",
"invalid_req_cnt"
};
static int pf_q_num_set(const char *val, const struct kernel_param *kp) static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{ {
return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
...@@ -524,6 +534,33 @@ static const struct file_operations hpre_ctrl_debug_fops = { ...@@ -524,6 +534,33 @@ static const struct file_operations hpre_ctrl_debug_fops = {
.write = hpre_ctrl_debug_write, .write = hpre_ctrl_debug_write,
}; };
static int hpre_debugfs_atomic64_get(void *data, u64 *val)
{
struct hpre_dfx *dfx_item = data;
*val = atomic64_read(&dfx_item->value);
return 0;
}
static int hpre_debugfs_atomic64_set(void *data, u64 val)
{
struct hpre_dfx *dfx_item = data;
struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
if (val)
return -EINVAL;
if (dfx_item->type == HPRE_OVERTIME_THRHLD)
atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
atomic64_set(&dfx_item->value, val);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
hpre_debugfs_atomic64_set, "%llu\n");
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx) enum hpre_ctrl_dbgfs_file type, int indx)
{ {
...@@ -621,6 +658,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug) ...@@ -621,6 +658,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug); return hpre_cluster_debugfs_init(debug);
} }
static void hpre_dfx_debug_init(struct hpre_debug *debug)
{
struct hpre *hpre = container_of(debug, struct hpre, debug);
struct hpre_dfx *dfx = hpre->debug.dfx;
struct hisi_qm *qm = &hpre->qm;
struct dentry *parent;
int i;
parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
dfx[i].type = i;
debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
&hpre_atomic64_ops);
}
}
static int hpre_debugfs_init(struct hpre *hpre) static int hpre_debugfs_init(struct hpre *hpre)
{ {
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
...@@ -641,6 +694,9 @@ static int hpre_debugfs_init(struct hpre *hpre) ...@@ -641,6 +694,9 @@ static int hpre_debugfs_init(struct hpre *hpre)
if (ret) if (ret)
goto failed_to_create; goto failed_to_create;
} }
hpre_dfx_debug_init(&hpre->debug);
return 0; return 0;
failed_to_create: failed_to_create:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment