Commit 75ecb231 authored by Herbert Xu's avatar Herbert Xu

crypto: hash - Add real ahash walk interface

Although the existing hash walk interface has already been used
by a number of ahash crypto drivers, it turns out that none of
them were really asynchronous.  They were all essentially polling
for completion.

That's why nobody has noticed until now that the walk interface
couldn't work with a real asynchronous driver since the memory
is mapped using kmap_atomic.

As we now have a use-case for a real ahash implementation on x86,
this patch creates a minimal ahash walk interface.  Basically it
just calls kmap instead of kmap_atomic and does away with the
crypto_yield call.  Real ahash crypto drivers don't need to yield
since by definition they won't be hogging the CPU.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 0118a552
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -46,7 +47,10 @@ static int hash_walk_next(struct crypto_hash_walk *walk) ...@@ -46,7 +47,10 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen, unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset); ((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_atomic(walk->pg); if (walk->flags & CRYPTO_ALG_ASYNC)
walk->data = kmap(walk->pg);
else
walk->data = kmap_atomic(walk->pg);
walk->data += offset; walk->data += offset;
if (offset & alignmask) { if (offset & alignmask) {
...@@ -93,8 +97,16 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) ...@@ -93,8 +97,16 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
return nbytes; return nbytes;
} }
kunmap_atomic(walk->data); if (walk->flags & CRYPTO_ALG_ASYNC)
crypto_yield(walk->flags); kunmap(walk->pg);
else {
kunmap_atomic(walk->data);
/*
* The may sleep test only makes sense for sync users.
* Async users don't need to sleep here anyway.
*/
crypto_yield(walk->flags);
}
if (err) if (err)
return err; return err;
...@@ -124,12 +136,31 @@ int crypto_hash_walk_first(struct ahash_request *req, ...@@ -124,12 +136,31 @@ int crypto_hash_walk_first(struct ahash_request *req,
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src; walk->sg = req->src;
walk->flags = req->base.flags; walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
return hash_walk_new_entry(walk); return hash_walk_new_entry(walk);
} }
EXPORT_SYMBOL_GPL(crypto_hash_walk_first); EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
int crypto_ahash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total)
return 0;
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
walk->flags |= CRYPTO_ALG_ASYNC;
BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
int crypto_hash_walk_first_compat(struct hash_desc *hdesc, int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk, struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len) struct scatterlist *sg, unsigned int len)
...@@ -141,7 +172,7 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, ...@@ -141,7 +172,7 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
walk->alignmask = crypto_hash_alignmask(hdesc->tfm); walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
walk->sg = sg; walk->sg = sg;
walk->flags = hdesc->flags; walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
return hash_walk_new_entry(walk); return hash_walk_new_entry(walk);
} }
......
...@@ -55,15 +55,28 @@ extern const struct crypto_type crypto_ahash_type; ...@@ -55,15 +55,28 @@ extern const struct crypto_type crypto_ahash_type;
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req, int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk); struct crypto_hash_walk *walk);
int crypto_ahash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
int crypto_hash_walk_first_compat(struct hash_desc *hdesc, int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk, struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len); struct scatterlist *sg, unsigned int len);
static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
int err)
{
return crypto_hash_walk_done(walk, err);
}
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{ {
return !(walk->entrylen | walk->total); return !(walk->entrylen | walk->total);
} }
static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
{
return crypto_hash_walk_last(walk);
}
int crypto_register_ahash(struct ahash_alg *alg); int crypto_register_ahash(struct ahash_alg *alg);
int crypto_unregister_ahash(struct ahash_alg *alg); int crypto_unregister_ahash(struct ahash_alg *alg);
int ahash_register_instance(struct crypto_template *tmpl, int ahash_register_instance(struct crypto_template *tmpl,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment