Commit 82fb82be authored by Aurelien Aptel's avatar Aurelien Aptel Committed by Steve French

CIFS: refactor crypto shash/sdesc allocation&free

shash and sdesc and always allocated and freed together.
* abstract this in new functions cifs_alloc_hash() and cifs_free_hash().
* make smb2/3 crypto allocation independent from each other.
Signed-off-by: default avatarAurelien Aptel <aaptel@suse.com>
Signed-off-by: default avatarSteve French <smfrench@gmail.com>
Reviewed-by: default avatarRonnie Sahlberg <lsahlber@redhat.com>
CC: Stable <stable@vger.kernel.org>
parent f30e4148
...@@ -36,37 +36,6 @@ ...@@ -36,37 +36,6 @@
#include <crypto/skcipher.h> #include <crypto/skcipher.h>
#include <crypto/aead.h> #include <crypto/aead.h>
static int
cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
{
int rc;
unsigned int size;
if (server->secmech.sdescmd5 != NULL)
return 0; /* already allocated */
server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(server->secmech.md5)) {
cifs_dbg(VFS, "could not allocate crypto md5\n");
rc = PTR_ERR(server->secmech.md5);
server->secmech.md5 = NULL;
return rc;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.md5);
server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdescmd5) {
crypto_free_shash(server->secmech.md5);
server->secmech.md5 = NULL;
return -ENOMEM;
}
server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
server->secmech.sdescmd5->shash.flags = 0x0;
return 0;
}
int __cifs_calc_signature(struct smb_rqst *rqst, int __cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature, struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash) struct shash_desc *shash)
...@@ -132,13 +101,10 @@ static int cifs_calc_signature(struct smb_rqst *rqst, ...@@ -132,13 +101,10 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
if (!rqst->rq_iov || !signature || !server) if (!rqst->rq_iov || !signature || !server)
return -EINVAL; return -EINVAL;
if (!server->secmech.sdescmd5) { rc = cifs_alloc_hash("md5", &server->secmech.md5,
rc = cifs_crypto_shash_md5_allocate(server); &server->secmech.sdescmd5);
if (rc) { if (rc)
cifs_dbg(VFS, "%s: Can't alloc md5 crypto\n", __func__);
return -1; return -1;
}
}
rc = crypto_shash_init(&server->secmech.sdescmd5->shash); rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
if (rc) { if (rc) {
...@@ -663,37 +629,6 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) ...@@ -663,37 +629,6 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
return rc; return rc;
} }
static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
{
int rc;
unsigned int size;
/* check if already allocated */
if (server->secmech.sdeschmacmd5)
return 0;
server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
if (IS_ERR(server->secmech.hmacmd5)) {
cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
rc = PTR_ERR(server->secmech.hmacmd5);
server->secmech.hmacmd5 = NULL;
return rc;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.hmacmd5);
server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdeschmacmd5) {
crypto_free_shash(server->secmech.hmacmd5);
server->secmech.hmacmd5 = NULL;
return -ENOMEM;
}
server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5;
server->secmech.sdeschmacmd5->shash.flags = 0x0;
return 0;
}
int int
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{ {
...@@ -757,9 +692,10 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) ...@@ -757,9 +692,10 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
mutex_lock(&ses->server->srv_mutex); mutex_lock(&ses->server->srv_mutex);
rc = crypto_hmacmd5_alloc(ses->server); rc = cifs_alloc_hash("hmac(md5)",
&ses->server->secmech.hmacmd5,
&ses->server->secmech.sdeschmacmd5);
if (rc) { if (rc) {
cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
goto unlock; goto unlock;
} }
......
...@@ -542,4 +542,9 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *, ...@@ -542,4 +542,9 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
struct cifs_aio_ctx *cifs_aio_ctx_alloc(void); struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
void cifs_aio_ctx_release(struct kref *refcount); void cifs_aio_ctx_release(struct kref *refcount);
int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw); int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
struct sdesc **sdesc);
void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
#endif /* _CIFSPROTO_H */ #endif /* _CIFSPROTO_H */
...@@ -50,25 +50,12 @@ static int ...@@ -50,25 +50,12 @@ static int
symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
{ {
int rc; int rc;
unsigned int size; struct crypto_shash *md5 = NULL;
struct crypto_shash *md5; struct sdesc *sdescmd5 = NULL;
struct sdesc *sdescmd5;
rc = cifs_alloc_hash("md5", &md5, &sdescmd5);
md5 = crypto_alloc_shash("md5", 0, 0); if (rc)
if (IS_ERR(md5)) {
rc = PTR_ERR(md5);
cifs_dbg(VFS, "%s: Crypto md5 allocation error %d\n",
__func__, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!sdescmd5) {
rc = -ENOMEM;
goto symlink_hash_err; goto symlink_hash_err;
}
sdescmd5->shash.tfm = md5;
sdescmd5->shash.flags = 0x0;
rc = crypto_shash_init(&sdescmd5->shash); rc = crypto_shash_init(&sdescmd5->shash);
if (rc) { if (rc) {
...@@ -85,9 +72,7 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) ...@@ -85,9 +72,7 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
symlink_hash_err: symlink_hash_err:
crypto_free_shash(md5); cifs_free_hash(&md5, &sdescmd5);
kfree(sdescmd5);
return rc; return rc;
} }
......
...@@ -848,3 +848,57 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) ...@@ -848,3 +848,57 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len); iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
return 0; return 0;
} }
/**
* cifs_alloc_hash - allocate hash and hash context together
*
* The caller has to make sure @sdesc is initialized to either NULL or
* a valid context. Both can be freed via cifs_free_hash().
*/
int
cifs_alloc_hash(const char *name,
struct crypto_shash **shash, struct sdesc **sdesc)
{
int rc = 0;
size_t size;
if (*sdesc != NULL)
return 0;
*shash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(*shash)) {
cifs_dbg(VFS, "could not allocate crypto %s\n", name);
rc = PTR_ERR(*shash);
*shash = NULL;
*sdesc = NULL;
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
*sdesc = kmalloc(size, GFP_KERNEL);
if (*sdesc == NULL) {
cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
crypto_free_shash(*shash);
*shash = NULL;
return -ENOMEM;
}
(*sdesc)->shash.tfm = *shash;
(*sdesc)->shash.flags = 0x0;
return 0;
}
/**
* cifs_free_hash - free hash and hash context together
*
* Freeing a NULL hash or context is safe.
*/
void
cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
{
kfree(*sdesc);
*sdesc = NULL;
if (*shash)
crypto_free_shash(*shash);
*shash = NULL;
}
...@@ -43,76 +43,31 @@ ...@@ -43,76 +43,31 @@
static int static int
smb2_crypto_shash_allocate(struct TCP_Server_Info *server) smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
{ {
int rc; return cifs_alloc_hash("hmac(sha256)",
unsigned int size; &server->secmech.hmacsha256,
&server->secmech.sdeschmacsha256);
if (server->secmech.sdeschmacsha256 != NULL)
return 0; /* already allocated */
server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(server->secmech.hmacsha256)) {
cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
rc = PTR_ERR(server->secmech.hmacsha256);
server->secmech.hmacsha256 = NULL;
return rc;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.hmacsha256);
server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdeschmacsha256) {
crypto_free_shash(server->secmech.hmacsha256);
server->secmech.hmacsha256 = NULL;
return -ENOMEM;
}
server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256;
server->secmech.sdeschmacsha256->shash.flags = 0x0;
return 0;
} }
static int static int
smb3_crypto_shash_allocate(struct TCP_Server_Info *server) smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
{ {
unsigned int size; struct cifs_secmech *p = &server->secmech;
int rc; int rc;
if (server->secmech.sdesccmacaes != NULL) rc = cifs_alloc_hash("hmac(sha256)",
return 0; /* already allocated */ &p->hmacsha256,
&p->sdeschmacsha256);
rc = smb2_crypto_shash_allocate(server);
if (rc) if (rc)
return rc; goto err;
server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0);
if (IS_ERR(server->secmech.cmacaes)) {
cifs_dbg(VFS, "could not allocate crypto cmac-aes");
kfree(server->secmech.sdeschmacsha256);
server->secmech.sdeschmacsha256 = NULL;
crypto_free_shash(server->secmech.hmacsha256);
server->secmech.hmacsha256 = NULL;
rc = PTR_ERR(server->secmech.cmacaes);
server->secmech.cmacaes = NULL;
return rc;
}
size = sizeof(struct shash_desc) + rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
crypto_shash_descsize(server->secmech.cmacaes); if (rc)
server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); goto err;
if (!server->secmech.sdesccmacaes) {
cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__);
kfree(server->secmech.sdeschmacsha256);
server->secmech.sdeschmacsha256 = NULL;
crypto_free_shash(server->secmech.hmacsha256);
crypto_free_shash(server->secmech.cmacaes);
server->secmech.hmacsha256 = NULL;
server->secmech.cmacaes = NULL;
return -ENOMEM;
}
server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes;
server->secmech.sdesccmacaes->shash.flags = 0x0;
return 0; return 0;
err:
cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
return rc;
} }
static struct cifs_ses * static struct cifs_ses *
......
...@@ -121,25 +121,12 @@ int ...@@ -121,25 +121,12 @@ int
mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
{ {
int rc; int rc;
unsigned int size; struct crypto_shash *md4 = NULL;
struct crypto_shash *md4; struct sdesc *sdescmd4 = NULL;
struct sdesc *sdescmd4;
rc = cifs_alloc_hash("md4", &md4, &sdescmd4);
md4 = crypto_alloc_shash("md4", 0, 0); if (rc)
if (IS_ERR(md4)) {
rc = PTR_ERR(md4);
cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n",
__func__, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
sdescmd4 = kmalloc(size, GFP_KERNEL);
if (!sdescmd4) {
rc = -ENOMEM;
goto mdfour_err; goto mdfour_err;
}
sdescmd4->shash.tfm = md4;
sdescmd4->shash.flags = 0x0;
rc = crypto_shash_init(&sdescmd4->shash); rc = crypto_shash_init(&sdescmd4->shash);
if (rc) { if (rc) {
...@@ -156,9 +143,7 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) ...@@ -156,9 +143,7 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__); cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__);
mdfour_err: mdfour_err:
crypto_free_shash(md4); cifs_free_hash(&md4, &sdescmd4);
kfree(sdescmd4);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment