Commit 9dcd71c8 authored by Corentin Labbe's avatar Corentin Labbe Committed by Herbert Xu

crypto: rockchip - Add support for RK3399

The RK3399 has 2 rk3288 compatible crypto device named crypto0 and
crypto1. The only difference is lack of RSA in crypto1.

We need to add driver support for 2 parallel instance as only one need
to register crypto algorithms.
Then the driver will round robin each request on each device.

For avoiding complexity (device bringup after a TFM is created), PM is
modified to be handled per request.
Signed-off-by: default avatarCorentin Labbe <clabbe@baylibre.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c5a1e104
...@@ -19,6 +19,23 @@ ...@@ -19,6 +19,23 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/reset.h> #include <linux/reset.h>
static struct rockchip_ip rocklist = {
.dev_list = LIST_HEAD_INIT(rocklist.dev_list),
.lock = __SPIN_LOCK_UNLOCKED(rocklist.lock),
};
struct rk_crypto_info *get_rk_crypto(void)
{
struct rk_crypto_info *first;
spin_lock(&rocklist.lock);
first = list_first_entry_or_null(&rocklist.dev_list,
struct rk_crypto_info, list);
list_rotate_left(&rocklist.dev_list);
spin_unlock(&rocklist.lock);
return first;
}
static const struct rk_variant rk3288_variant = { static const struct rk_variant rk3288_variant = {
.num_clks = 4, .num_clks = 4,
.rkclks = { .rkclks = {
...@@ -30,6 +47,10 @@ static const struct rk_variant rk3328_variant = { ...@@ -30,6 +47,10 @@ static const struct rk_variant rk3328_variant = {
.num_clks = 3, .num_clks = 3,
}; };
static const struct rk_variant rk3399_variant = {
.num_clks = 3,
};
static int rk_crypto_get_clks(struct rk_crypto_info *dev) static int rk_crypto_get_clks(struct rk_crypto_info *dev)
{ {
int i, j, err; int i, j, err;
...@@ -83,8 +104,8 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev) ...@@ -83,8 +104,8 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
} }
/* /*
* Power management strategy: The device is suspended unless a TFM exists for * Power management strategy: The device is suspended until a request
* one of the algorithms proposed by this driver. * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s.
*/ */
static int rk_crypto_pm_suspend(struct device *dev) static int rk_crypto_pm_suspend(struct device *dev)
{ {
...@@ -166,8 +187,17 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = { ...@@ -166,8 +187,17 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
{ {
struct rk_crypto_info *dd;
unsigned int i; unsigned int i;
spin_lock(&rocklist.lock);
list_for_each_entry(dd, &rocklist.dev_list, list) {
seq_printf(seq, "%s %s requests: %lu\n",
dev_driver_string(dd->dev), dev_name(dd->dev),
dd->nreq);
}
spin_unlock(&rocklist.lock);
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (!rk_cipher_algs[i]->dev) if (!rk_cipher_algs[i]->dev)
continue; continue;
...@@ -198,6 +228,18 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) ...@@ -198,6 +228,18 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
#endif #endif
static void register_debugfs(struct rk_crypto_info *crypto_info)
{
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
/* Ignore error of debugfs */
rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
rocklist.dbgfs_dir,
&rocklist,
&rk_crypto_debugfs_fops);
#endif
}
static int rk_crypto_register(struct rk_crypto_info *crypto_info) static int rk_crypto_register(struct rk_crypto_info *crypto_info)
{ {
unsigned int i, k; unsigned int i, k;
...@@ -255,6 +297,9 @@ static const struct of_device_id crypto_of_id_table[] = { ...@@ -255,6 +297,9 @@ static const struct of_device_id crypto_of_id_table[] = {
{ .compatible = "rockchip,rk3328-crypto", { .compatible = "rockchip,rk3328-crypto",
.data = &rk3328_variant, .data = &rk3328_variant,
}, },
{ .compatible = "rockchip,rk3399-crypto",
.data = &rk3399_variant,
},
{} {}
}; };
MODULE_DEVICE_TABLE(of, crypto_of_id_table); MODULE_DEVICE_TABLE(of, crypto_of_id_table);
...@@ -262,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table); ...@@ -262,7 +307,7 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table);
static int rk_crypto_probe(struct platform_device *pdev) static int rk_crypto_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct rk_crypto_info *crypto_info; struct rk_crypto_info *crypto_info, *first;
int err = 0; int err = 0;
crypto_info = devm_kzalloc(&pdev->dev, crypto_info = devm_kzalloc(&pdev->dev,
...@@ -325,22 +370,22 @@ static int rk_crypto_probe(struct platform_device *pdev) ...@@ -325,22 +370,22 @@ static int rk_crypto_probe(struct platform_device *pdev)
if (err) if (err)
goto err_pm; goto err_pm;
spin_lock(&rocklist.lock);
first = list_first_entry_or_null(&rocklist.dev_list,
struct rk_crypto_info, list);
list_add_tail(&crypto_info->list, &rocklist.dev_list);
spin_unlock(&rocklist.lock);
if (!first) {
err = rk_crypto_register(crypto_info); err = rk_crypto_register(crypto_info);
if (err) { if (err) {
dev_err(dev, "err in register alg"); dev_err(dev, "Fail to register crypto algorithms");
goto err_register_alg; goto err_register_alg;
} }
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG register_debugfs(crypto_info);
/* Ignore error of debugfs */ }
crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444,
crypto_info->dbgfs_dir,
crypto_info,
&rk_crypto_debugfs_fops);
#endif
dev_info(dev, "Crypto Accelerator successfully registered\n");
return 0; return 0;
err_register_alg: err_register_alg:
...@@ -355,11 +400,20 @@ static int rk_crypto_probe(struct platform_device *pdev) ...@@ -355,11 +400,20 @@ static int rk_crypto_probe(struct platform_device *pdev)
static int rk_crypto_remove(struct platform_device *pdev) static int rk_crypto_remove(struct platform_device *pdev)
{ {
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
struct rk_crypto_info *first;
spin_lock_bh(&rocklist.lock);
list_del(&crypto_tmp->list);
first = list_first_entry_or_null(&rocklist.dev_list,
struct rk_crypto_info, list);
spin_unlock_bh(&rocklist.lock);
if (!first) {
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
debugfs_remove_recursive(crypto_tmp->dbgfs_dir); debugfs_remove_recursive(rocklist.dbgfs_dir);
#endif #endif
rk_crypto_unregister(); rk_crypto_unregister();
}
rk_crypto_pm_exit(crypto_tmp); rk_crypto_pm_exit(crypto_tmp);
crypto_engine_exit(crypto_tmp->engine); crypto_engine_exit(crypto_tmp->engine);
return 0; return 0;
......
...@@ -190,6 +190,20 @@ ...@@ -190,6 +190,20 @@
#define RK_MAX_CLKS 4 #define RK_MAX_CLKS 4
/*
* struct rockchip_ip - struct for managing a list of RK crypto instance
* @dev_list: Used for doing a list of rk_crypto_info
* @lock: Control access to dev_list
* @dbgfs_dir: Debugfs dentry for statistic directory
* @dbgfs_stats: Debugfs dentry for statistic counters
*/
struct rockchip_ip {
struct list_head dev_list;
spinlock_t lock; /* Control access to dev_list */
struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats;
};
struct rk_clks { struct rk_clks {
const char *name; const char *name;
unsigned long max; unsigned long max;
...@@ -201,6 +215,7 @@ struct rk_variant { ...@@ -201,6 +215,7 @@ struct rk_variant {
}; };
struct rk_crypto_info { struct rk_crypto_info {
struct list_head list;
struct device *dev; struct device *dev;
struct clk_bulk_data *clks; struct clk_bulk_data *clks;
int num_clks; int num_clks;
...@@ -208,19 +223,15 @@ struct rk_crypto_info { ...@@ -208,19 +223,15 @@ struct rk_crypto_info {
void __iomem *reg; void __iomem *reg;
int irq; int irq;
const struct rk_variant *variant; const struct rk_variant *variant;
unsigned long nreq;
struct crypto_engine *engine; struct crypto_engine *engine;
struct completion complete; struct completion complete;
int status; int status;
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats;
#endif
}; };
/* the private variable of hash */ /* the private variable of hash */
struct rk_ahash_ctx { struct rk_ahash_ctx {
struct crypto_engine_ctx enginectx; struct crypto_engine_ctx enginectx;
struct rk_crypto_info *dev;
/* for fallback */ /* for fallback */
struct crypto_ahash *fallback_tfm; struct crypto_ahash *fallback_tfm;
}; };
...@@ -236,7 +247,6 @@ struct rk_ahash_rctx { ...@@ -236,7 +247,6 @@ struct rk_ahash_rctx {
/* the private variable of cipher */ /* the private variable of cipher */
struct rk_cipher_ctx { struct rk_cipher_ctx {
struct crypto_engine_ctx enginectx; struct crypto_engine_ctx enginectx;
struct rk_crypto_info *dev;
unsigned int keylen; unsigned int keylen;
u8 key[AES_MAX_KEY_SIZE]; u8 key[AES_MAX_KEY_SIZE];
u8 iv[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE];
...@@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha1; ...@@ -276,4 +286,5 @@ extern struct rk_crypto_tmp rk_ahash_sha1;
extern struct rk_crypto_tmp rk_ahash_sha256; extern struct rk_crypto_tmp rk_ahash_sha256;
extern struct rk_crypto_tmp rk_ahash_md5; extern struct rk_crypto_tmp rk_ahash_md5;
struct rk_crypto_info *get_rk_crypto(void);
#endif #endif
...@@ -199,8 +199,8 @@ static int rk_ahash_export(struct ahash_request *req, void *out) ...@@ -199,8 +199,8 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
static int rk_ahash_digest(struct ahash_request *req) static int rk_ahash_digest(struct ahash_request *req)
{ {
struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct rk_crypto_info *dev;
struct rk_crypto_info *dev = tctx->dev; struct crypto_engine *engine;
if (rk_ahash_need_fallback(req)) if (rk_ahash_need_fallback(req))
return rk_ahash_digest_fb(req); return rk_ahash_digest_fb(req);
...@@ -208,9 +208,12 @@ static int rk_ahash_digest(struct ahash_request *req) ...@@ -208,9 +208,12 @@ static int rk_ahash_digest(struct ahash_request *req)
if (!req->nbytes) if (!req->nbytes)
return zero_message_process(req); return zero_message_process(req);
dev = get_rk_crypto();
rctx->dev = dev; rctx->dev = dev;
engine = dev->engine;
return crypto_transfer_hash_request_to_engine(dev->engine, req); return crypto_transfer_hash_request_to_engine(engine, req);
} }
static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
...@@ -260,9 +263,14 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) ...@@ -260,9 +263,14 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
int i; int i;
u32 v; u32 v;
err = pm_runtime_resume_and_get(rkc->dev);
if (err)
return err;
rctx->mode = 0; rctx->mode = 0;
algt->stat_req++; algt->stat_req++;
rkc->nreq++;
switch (crypto_ahash_digestsize(tfm)) { switch (crypto_ahash_digestsize(tfm)) {
case SHA1_DIGEST_SIZE: case SHA1_DIGEST_SIZE:
...@@ -313,6 +321,8 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) ...@@ -313,6 +321,8 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
} }
theend: theend:
pm_runtime_put_autosuspend(rkc->dev);
local_bh_disable(); local_bh_disable();
crypto_finalize_hash_request(engine, breq, err); crypto_finalize_hash_request(engine, breq, err);
local_bh_enable(); local_bh_enable();
...@@ -323,21 +333,15 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) ...@@ -323,21 +333,15 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
static int rk_cra_hash_init(struct crypto_tfm *tfm) static int rk_cra_hash_init(struct crypto_tfm *tfm)
{ {
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
struct rk_crypto_tmp *algt;
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
const char *alg_name = crypto_tfm_alg_name(tfm); const char *alg_name = crypto_tfm_alg_name(tfm);
int err; struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
tctx->dev = algt->dev;
/* for fallback */ /* for fallback */
tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tctx->fallback_tfm)) { if (IS_ERR(tctx->fallback_tfm)) {
dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); dev_err(algt->dev->dev, "Could not load fallback driver.\n");
return PTR_ERR(tctx->fallback_tfm); return PTR_ERR(tctx->fallback_tfm);
} }
...@@ -349,15 +353,7 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) ...@@ -349,15 +353,7 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
tctx->enginectx.op.prepare_request = rk_hash_prepare; tctx->enginectx.op.prepare_request = rk_hash_prepare;
tctx->enginectx.op.unprepare_request = rk_hash_unprepare; tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
err = pm_runtime_resume_and_get(tctx->dev->dev);
if (err < 0)
goto error_pm;
return 0; return 0;
error_pm:
crypto_free_ahash(tctx->fallback_tfm);
return err;
} }
static void rk_cra_hash_exit(struct crypto_tfm *tfm) static void rk_cra_hash_exit(struct crypto_tfm *tfm)
...@@ -365,7 +361,6 @@ static void rk_cra_hash_exit(struct crypto_tfm *tfm) ...@@ -365,7 +361,6 @@ static void rk_cra_hash_exit(struct crypto_tfm *tfm)
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(tctx->fallback_tfm); crypto_free_ahash(tctx->fallback_tfm);
pm_runtime_put_autosuspend(tctx->dev->dev);
} }
struct rk_crypto_tmp rk_ahash_sha1 = { struct rk_crypto_tmp rk_ahash_sha1 = {
......
...@@ -17,11 +17,11 @@ ...@@ -17,11 +17,11 @@
static int rk_cipher_need_fallback(struct skcipher_request *req) static int rk_cipher_need_fallback(struct skcipher_request *req)
{ {
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int bs = crypto_skcipher_blocksize(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
struct scatterlist *sgs, *sgd; struct scatterlist *sgs, *sgd;
unsigned int stodo, dtodo, len; unsigned int stodo, dtodo, len;
unsigned int bs = crypto_skcipher_blocksize(tfm);
if (!req->cryptlen) if (!req->cryptlen)
return true; return true;
...@@ -84,15 +84,16 @@ static int rk_cipher_fallback(struct skcipher_request *areq) ...@@ -84,15 +84,16 @@ static int rk_cipher_fallback(struct skcipher_request *areq)
static int rk_cipher_handle_req(struct skcipher_request *req) static int rk_cipher_handle_req(struct skcipher_request *req)
{ {
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm);
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
struct rk_crypto_info *rkc = tctx->dev; struct rk_crypto_info *rkc;
struct crypto_engine *engine = rkc->engine; struct crypto_engine *engine;
if (rk_cipher_need_fallback(req)) if (rk_cipher_need_fallback(req))
return rk_cipher_fallback(req); return rk_cipher_fallback(req);
rkc = get_rk_crypto();
engine = rkc->engine;
rctx->dev = rkc; rctx->dev = rkc;
return crypto_transfer_skcipher_request_to_engine(engine, req); return crypto_transfer_skcipher_request_to_engine(engine, req);
...@@ -307,7 +308,12 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ...@@ -307,7 +308,12 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
struct rk_crypto_info *rkc = rctx->dev; struct rk_crypto_info *rkc = rctx->dev;
err = pm_runtime_resume_and_get(rkc->dev);
if (err)
return err;
algt->stat_req++; algt->stat_req++;
rkc->nreq++;
ivsize = crypto_skcipher_ivsize(tfm); ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
...@@ -401,6 +407,8 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ...@@ -401,6 +407,8 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
} }
theend: theend:
pm_runtime_put_autosuspend(rkc->dev);
local_bh_disable(); local_bh_disable();
crypto_finalize_skcipher_request(engine, areq, err); crypto_finalize_skcipher_request(engine, areq, err);
local_bh_enable(); local_bh_enable();
...@@ -420,18 +428,13 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ...@@ -420,18 +428,13 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
{ {
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
const char *name = crypto_tfm_alg_name(&tfm->base); const char *name = crypto_tfm_alg_name(&tfm->base);
struct rk_crypto_tmp *algt; struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int err; struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
ctx->dev = algt->dev;
ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_tfm)) { if (IS_ERR(ctx->fallback_tfm)) {
dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(ctx->fallback_tfm)); name, PTR_ERR(ctx->fallback_tfm));
return PTR_ERR(ctx->fallback_tfm); return PTR_ERR(ctx->fallback_tfm);
} }
...@@ -441,14 +444,7 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) ...@@ -441,14 +444,7 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
ctx->enginectx.op.do_one_request = rk_cipher_run; ctx->enginectx.op.do_one_request = rk_cipher_run;
err = pm_runtime_resume_and_get(ctx->dev->dev);
if (err < 0)
goto error_pm;
return 0; return 0;
error_pm:
crypto_free_skcipher(ctx->fallback_tfm);
return err;
} }
static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
...@@ -457,7 +453,6 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) ...@@ -457,7 +453,6 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
memzero_explicit(ctx->key, ctx->keylen); memzero_explicit(ctx->key, ctx->keylen);
crypto_free_skcipher(ctx->fallback_tfm); crypto_free_skcipher(ctx->fallback_tfm);
pm_runtime_put_autosuspend(ctx->dev->dev);
} }
struct rk_crypto_tmp rk_ecb_aes_alg = { struct rk_crypto_tmp rk_ecb_aes_alg = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment