Commit ccbf7298 authored by Cyrille Pitchen's avatar Cyrille Pitchen Committed by Herbert Xu

crypto: atmel-aes - make crypto request queue management more generic

This patch changes atmel_aes_handle_queue() to make it more generic.
The function argument is now a pointer to struct crypto_async_request,
which is the common base of struct ablkcipher_request and
struct aead_request.

Also this patch introduces struct atmel_aes_base_ctx which will be the
common base of all the transformation contexts.

Hence the very same queue will be used to manage both block cipher and
AEAD requests (such as gcm and authenc implemented in further patches).
Signed-off-by: default avatarCyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent cdfab4a7
...@@ -78,8 +78,13 @@ struct atmel_aes_caps { ...@@ -78,8 +78,13 @@ struct atmel_aes_caps {
struct atmel_aes_dev; struct atmel_aes_dev;
struct atmel_aes_ctx {
typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
struct atmel_aes_base_ctx {
struct atmel_aes_dev *dd; struct atmel_aes_dev *dd;
atmel_aes_fn_t start;
int keylen; int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u32 key[AES_KEYSIZE_256 / sizeof(u32)];
...@@ -87,6 +92,10 @@ struct atmel_aes_ctx { ...@@ -87,6 +92,10 @@ struct atmel_aes_ctx {
u16 block_size; u16 block_size;
}; };
struct atmel_aes_ctx {
struct atmel_aes_base_ctx base;
};
struct atmel_aes_reqctx { struct atmel_aes_reqctx {
unsigned long mode; unsigned long mode;
}; };
...@@ -101,7 +110,9 @@ struct atmel_aes_dev { ...@@ -101,7 +110,9 @@ struct atmel_aes_dev {
unsigned long phys_base; unsigned long phys_base;
void __iomem *io_base; void __iomem *io_base;
struct atmel_aes_ctx *ctx; struct crypto_async_request *areq;
struct atmel_aes_base_ctx *ctx;
struct device *dev; struct device *dev;
struct clk *iclk; struct clk *iclk;
int irq; int irq;
...@@ -115,7 +126,6 @@ struct atmel_aes_dev { ...@@ -115,7 +126,6 @@ struct atmel_aes_dev {
struct tasklet_struct done_task; struct tasklet_struct done_task;
struct tasklet_struct queue_task; struct tasklet_struct queue_task;
struct ablkcipher_request *req;
size_t total; size_t total;
struct scatterlist *in_sg; struct scatterlist *in_sg;
...@@ -236,7 +246,7 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, ...@@ -236,7 +246,7 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
atmel_aes_write(dd, offset, *value); atmel_aes_write(dd, offset, *value);
} }
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
{ {
struct atmel_aes_dev *aes_dd = NULL; struct atmel_aes_dev *aes_dd = NULL;
struct atmel_aes_dev *tmp; struct atmel_aes_dev *tmp;
...@@ -298,7 +308,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) ...@@ -298,7 +308,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
{ {
struct ablkcipher_request *req = dd->req; struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
clk_disable_unprepare(dd->iclk); clk_disable_unprepare(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY; dd->flags &= ~AES_FLAGS_BUSY;
...@@ -396,6 +406,8 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, ...@@ -396,6 +406,8 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
{ {
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd->flags &= ~AES_FLAGS_DMA; dd->flags &= ~AES_FLAGS_DMA;
dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
...@@ -404,11 +416,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) ...@@ -404,11 +416,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
dd->dma_size, DMA_FROM_DEVICE); dd->dma_size, DMA_FROM_DEVICE);
/* use cache buffers */ /* use cache buffers */
dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); dd->nb_in_sg = atmel_aes_sg_length(req, dd->in_sg);
if (!dd->nb_in_sg) if (!dd->nb_in_sg)
return -EINVAL; return -EINVAL;
dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); dd->nb_out_sg = atmel_aes_sg_length(req, dd->out_sg);
if (!dd->nb_out_sg) if (!dd->nb_out_sg)
return -EINVAL; return -EINVAL;
...@@ -556,38 +568,49 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, ...@@ -556,38 +568,49 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
} }
static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
struct ablkcipher_request *req) struct crypto_async_request *new_areq)
{ {
struct crypto_async_request *async_req, *backlog; struct crypto_async_request *areq, *backlog;
struct atmel_aes_ctx *ctx; struct atmel_aes_base_ctx *ctx;
struct atmel_aes_reqctx *rctx;
unsigned long flags; unsigned long flags;
int err, ret = 0; int err, ret = 0;
bool use_dma;
spin_lock_irqsave(&dd->lock, flags); spin_lock_irqsave(&dd->lock, flags);
if (req) if (new_areq)
ret = ablkcipher_enqueue_request(&dd->queue, req); ret = crypto_enqueue_request(&dd->queue, new_areq);
if (dd->flags & AES_FLAGS_BUSY) { if (dd->flags & AES_FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags); spin_unlock_irqrestore(&dd->lock, flags);
return ret; return ret;
} }
backlog = crypto_get_backlog(&dd->queue); backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue); areq = crypto_dequeue_request(&dd->queue);
if (async_req) if (areq)
dd->flags |= AES_FLAGS_BUSY; dd->flags |= AES_FLAGS_BUSY;
spin_unlock_irqrestore(&dd->lock, flags); spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req) if (!areq)
return ret; return ret;
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
req = ablkcipher_request_cast(async_req); ctx = crypto_tfm_ctx(areq->tfm);
dd->areq = areq;
dd->ctx = ctx;
err = ctx->start(dd);
return (areq != new_areq) ? ret : err;
}
static int atmel_aes_start(struct atmel_aes_dev *dd)
{
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
struct atmel_aes_reqctx *rctx;
bool use_dma;
int err;
/* assign new request to device */ /* assign new request to device */
dd->req = req;
dd->total = req->nbytes; dd->total = req->nbytes;
dd->in_offset = 0; dd->in_offset = 0;
dd->in_sg = req->src; dd->in_sg = req->src;
...@@ -595,11 +618,8 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, ...@@ -595,11 +618,8 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
dd->out_sg = req->dst; dd->out_sg = req->dst;
rctx = ablkcipher_request_ctx(req); rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= AES_FLAGS_MODE_MASK; rctx->mode &= AES_FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
dd->ctx = ctx;
ctx->dd = dd;
err = atmel_aes_hw_init(dd); err = atmel_aes_hw_init(dd);
if (!err) { if (!err) {
...@@ -616,7 +636,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, ...@@ -616,7 +636,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
tasklet_schedule(&dd->queue_task); tasklet_schedule(&dd->queue_task);
} }
return ret; return -EINPROGRESS;
} }
static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
...@@ -704,7 +724,7 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) ...@@ -704,7 +724,7 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{ {
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req)); crypto_ablkcipher_reqtfm(req));
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct atmel_aes_dev *dd; struct atmel_aes_dev *dd;
...@@ -747,7 +767,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) ...@@ -747,7 +767,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode; rctx->mode = mode;
return atmel_aes_handle_queue(dd, req); return atmel_aes_handle_queue(dd, &req->base);
} }
static bool atmel_aes_filter(struct dma_chan *chan, void *slave) static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
...@@ -822,7 +842,7 @@ static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) ...@@ -822,7 +842,7 @@ static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) { keylen != AES_KEYSIZE_256) {
...@@ -946,7 +966,10 @@ static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) ...@@ -946,7 +966,10 @@ static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
static int atmel_aes_cra_init(struct crypto_tfm *tfm) static int atmel_aes_cra_init(struct crypto_tfm *tfm)
{ {
struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
ctx->base.start = atmel_aes_start;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment