Commit fb415f22 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-4.9-1' of git://linux-nfs.org/~bfields/linux

Pull nfsd bugfixes from Bruce Fields:
 "Fixes for some recent regressions including fallout from the vmalloc'd
  stack change (after which we can no longer encrypt stuff on the
  stack)"

* tag 'nfsd-4.9-1' of git://linux-nfs.org/~bfields/linux:
  nfsd: Fix general protection fault in release_lock_stateid()
  svcrdma: backchannel cannot share a page for send and rcv buffers
  sunrpc: fix some missing rq_rbuffer assignments
  sunrpc: don't pass on-stack memory to sg_set_buf
  nfsd: move blocked lock handling under a dedicated spinlock
parents 46d7cbb2 f46c445b
...@@ -84,6 +84,8 @@ struct nfsd_net { ...@@ -84,6 +84,8 @@ struct nfsd_net {
struct list_head client_lru; struct list_head client_lru;
struct list_head close_lru; struct list_head close_lru;
struct list_head del_recall_lru; struct list_head del_recall_lru;
/* protected by blocked_locks_lock */
struct list_head blocked_locks_lru; struct list_head blocked_locks_lru;
struct delayed_work laundromat_work; struct delayed_work laundromat_work;
...@@ -91,6 +93,9 @@ struct nfsd_net { ...@@ -91,6 +93,9 @@ struct nfsd_net {
/* client_lock protects the client lru list and session hash table */ /* client_lock protects the client lru list and session hash table */
spinlock_t client_lock; spinlock_t client_lock;
/* protects blocked_locks_lru */
spinlock_t blocked_locks_lock;
struct file *rec_file; struct file *rec_file;
bool in_grace; bool in_grace;
const struct nfsd4_client_tracking_ops *client_tracking_ops; const struct nfsd4_client_tracking_ops *client_tracking_ops;
......
...@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, ...@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
{ {
struct nfsd4_blocked_lock *cur, *found = NULL; struct nfsd4_blocked_lock *cur, *found = NULL;
spin_lock(&nn->client_lock); spin_lock(&nn->blocked_locks_lock);
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) { if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list); list_del_init(&cur->nbl_list);
...@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, ...@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
break; break;
} }
} }
spin_unlock(&nn->client_lock); spin_unlock(&nn->blocked_locks_lock);
if (found) if (found)
posix_unblock_lock(&found->nbl_lock); posix_unblock_lock(&found->nbl_lock);
return found; return found;
...@@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, ...@@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{ {
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
list_del_init(&stp->st_locks); list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid); nfs4_unhash_stid(&stp->st_stid);
...@@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) ...@@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp) static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{ {
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed; bool unhashed;
spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp); unhashed = unhash_lock_stateid(stp);
spin_unlock(&oo->oo_owner.so_client->cl_lock); spin_unlock(&clp->cl_lock);
if (unhashed) if (unhashed)
nfs4_put_stid(&stp->st_stid); nfs4_put_stid(&stp->st_stid);
} }
...@@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn) ...@@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn)
* indefinitely once the lock does become free. * indefinitely once the lock does become free.
*/ */
BUG_ON(!list_empty(&reaplist)); BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->client_lock); spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) { while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru, nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru); struct nfsd4_blocked_lock, nbl_lru);
...@@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn) ...@@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn)
list_move(&nbl->nbl_lru, &reaplist); list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_list);
} }
spin_unlock(&nn->client_lock); spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) { while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru, nbl = list_first_entry(&nn->blocked_locks_lru,
...@@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl) ...@@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl)
bool queue = false; bool queue = false;
/* An empty list means that something else is going to be using it */ /* An empty list means that something else is going to be using it */
spin_lock(&nn->client_lock); spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list)) { if (!list_empty(&nbl->nbl_list)) {
list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru); list_del_init(&nbl->nbl_lru);
queue = true; queue = true;
} }
spin_unlock(&nn->client_lock); spin_unlock(&nn->blocked_locks_lock);
if (queue) if (queue)
nfsd4_run_cb(&nbl->nbl_cb); nfsd4_run_cb(&nbl->nbl_cb);
...@@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (fl_flags & FL_SLEEP) { if (fl_flags & FL_SLEEP) {
nbl->nbl_time = jiffies; nbl->nbl_time = jiffies;
spin_lock(&nn->client_lock); spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
spin_unlock(&nn->client_lock); spin_unlock(&nn->blocked_locks_lock);
} }
err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
...@@ -5900,10 +5898,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5900,10 +5898,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (nbl) { if (nbl) {
/* dequeue it if we queued it before */ /* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) { if (fl_flags & FL_SLEEP) {
spin_lock(&nn->client_lock); spin_lock(&nn->blocked_locks_lock);
list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru); list_del_init(&nbl->nbl_lru);
spin_unlock(&nn->client_lock); spin_unlock(&nn->blocked_locks_lock);
} }
free_blocked_lock(nbl); free_blocked_lock(nbl);
} }
...@@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net) ...@@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->client_lru); INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru); INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru); INIT_LIST_HEAD(&nn->del_recall_lru);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
spin_lock_init(&nn->client_lock); spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
get_net(net); get_net(net);
...@@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net) ...@@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net)
} }
BUG_ON(!list_empty(&reaplist)); BUG_ON(!list_empty(&reaplist));
spin_lock(&nn->client_lock); spin_lock(&nn->blocked_locks_lock);
while (!list_empty(&nn->blocked_locks_lru)) { while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru, nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru); struct nfsd4_blocked_lock, nbl_lru);
list_move(&nbl->nbl_lru, &reaplist); list_move(&nbl->nbl_lru, &reaplist);
list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_list);
} }
spin_unlock(&nn->client_lock); spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) { while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru, nbl = list_first_entry(&nn->blocked_locks_lru,
......
...@@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p) ...@@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
{ {
struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 seq; __be32 *seq = NULL;
struct kvec iov; struct kvec iov;
struct xdr_buf verf_buf; struct xdr_buf verf_buf;
struct xdr_netobj mic; struct xdr_netobj mic;
...@@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p) ...@@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p)
goto out_bad; goto out_bad;
if (flav != RPC_AUTH_GSS) if (flav != RPC_AUTH_GSS)
goto out_bad; goto out_bad;
seq = htonl(task->tk_rqstp->rq_seqno); seq = kmalloc(4, GFP_NOFS);
iov.iov_base = &seq; if (!seq)
iov.iov_len = sizeof(seq); goto out_bad;
*seq = htonl(task->tk_rqstp->rq_seqno);
iov.iov_base = seq;
iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_buf); xdr_buf_from_iov(&iov, &verf_buf);
mic.data = (u8 *)p; mic.data = (u8 *)p;
mic.len = len; mic.len = len;
...@@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p) ...@@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p)
gss_put_ctx(ctx); gss_put_ctx(ctx);
dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
task->tk_pid, __func__); task->tk_pid, __func__);
kfree(seq);
return p + XDR_QUADLEN(len); return p + XDR_QUADLEN(len);
out_bad: out_bad:
gss_put_ctx(ctx); gss_put_ctx(ctx);
dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
PTR_ERR(ret)); PTR_ERR(ret));
kfree(seq);
return ret; return ret;
} }
......
...@@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
unsigned int usage, struct xdr_netobj *cksumout) unsigned int usage, struct xdr_netobj *cksumout)
{ {
struct scatterlist sg[1]; struct scatterlist sg[1];
int err; int err = -1;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; u8 *checksumdata;
u8 rc4salt[4]; u8 rc4salt[4];
struct crypto_ahash *md5; struct crypto_ahash *md5;
struct crypto_ahash *hmac_md5; struct crypto_ahash *hmac_md5;
...@@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE; return GSS_S_FAILURE;
} }
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
if (!checksumdata)
return GSS_S_FAILURE;
md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5)) if (IS_ERR(md5))
return GSS_S_FAILURE; goto out_free_cksum;
hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC); CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_md5)) { if (IS_ERR(hmac_md5))
crypto_free_ahash(md5); goto out_free_md5;
return GSS_S_FAILURE;
}
req = ahash_request_alloc(md5, GFP_KERNEL); req = ahash_request_alloc(md5, GFP_KERNEL);
if (!req) { if (!req)
crypto_free_ahash(hmac_md5); goto out_free_hmac_md5;
crypto_free_ahash(md5);
return GSS_S_FAILURE;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
...@@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
ahash_request_free(req); ahash_request_free(req);
req = ahash_request_alloc(hmac_md5, GFP_KERNEL); req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
if (!req) { if (!req)
crypto_free_ahash(hmac_md5); goto out_free_hmac_md5;
crypto_free_ahash(md5);
return GSS_S_FAILURE;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
...@@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength; cksumout->len = kctx->gk5e->cksumlength;
out: out:
ahash_request_free(req); ahash_request_free(req);
crypto_free_ahash(md5); out_free_hmac_md5:
crypto_free_ahash(hmac_md5); crypto_free_ahash(hmac_md5);
out_free_md5:
crypto_free_ahash(md5);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0; return err ? GSS_S_FAILURE : 0;
} }
...@@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct ahash_request *req; struct ahash_request *req;
struct scatterlist sg[1]; struct scatterlist sg[1];
int err; int err = -1;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; u8 *checksumdata;
unsigned int checksumlen; unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
...@@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE; return GSS_S_FAILURE;
} }
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
if (checksumdata == NULL)
return GSS_S_FAILURE;
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) if (IS_ERR(tfm))
return GSS_S_FAILURE; goto out_free_cksum;
req = ahash_request_alloc(tfm, GFP_KERNEL); req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) { if (!req)
crypto_free_ahash(tfm); goto out_free_ahash;
return GSS_S_FAILURE;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
...@@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
cksumout->len = kctx->gk5e->cksumlength; cksumout->len = kctx->gk5e->cksumlength;
out: out:
ahash_request_free(req); ahash_request_free(req);
out_free_ahash:
crypto_free_ahash(tfm); crypto_free_ahash(tfm);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0; return err ? GSS_S_FAILURE : 0;
} }
...@@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct ahash_request *req; struct ahash_request *req;
struct scatterlist sg[1]; struct scatterlist sg[1];
int err; int err = -1;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; u8 *checksumdata;
unsigned int checksumlen; unsigned int checksumlen;
if (kctx->gk5e->keyed_cksum == 0) { if (kctx->gk5e->keyed_cksum == 0) {
...@@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
return GSS_S_FAILURE; return GSS_S_FAILURE;
} }
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
if (!checksumdata)
return GSS_S_FAILURE;
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) if (IS_ERR(tfm))
return GSS_S_FAILURE; goto out_free_cksum;
checksumlen = crypto_ahash_digestsize(tfm); checksumlen = crypto_ahash_digestsize(tfm);
req = ahash_request_alloc(tfm, GFP_KERNEL); req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) { if (!req)
crypto_free_ahash(tfm); goto out_free_ahash;
return GSS_S_FAILURE;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
...@@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, ...@@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
} }
out: out:
ahash_request_free(req); ahash_request_free(req);
out_free_ahash:
crypto_free_ahash(tfm); crypto_free_ahash(tfm);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0; return err ? GSS_S_FAILURE : 0;
} }
...@@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, ...@@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
u32 ret; u32 ret;
struct scatterlist sg[1]; struct scatterlist sg[1];
SKCIPHER_REQUEST_ON_STACK(req, cipher); SKCIPHER_REQUEST_ON_STACK(req, cipher);
u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; u8 *data;
struct page **save_pages; struct page **save_pages;
u32 len = buf->len - offset; u32 len = buf->len - offset;
if (len > ARRAY_SIZE(data)) { if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
WARN_ON(0); WARN_ON(0);
return -ENOMEM; return -ENOMEM;
} }
data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
if (!data)
return -ENOMEM;
/* /*
* For encryption, we want to read from the cleartext * For encryption, we want to read from the cleartext
...@@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, ...@@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
ret = write_bytes_to_xdr_buf(buf, offset, data, len); ret = write_bytes_to_xdr_buf(buf, offset, data, len);
out: out:
kfree(data);
return ret; return ret;
} }
......
...@@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp) ...@@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp)
static int static int
gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
{ {
__be32 xdr_seq; __be32 *xdr_seq;
u32 maj_stat; u32 maj_stat;
struct xdr_buf verf_data; struct xdr_buf verf_data;
struct xdr_netobj mic; struct xdr_netobj mic;
__be32 *p; __be32 *p;
struct kvec iov; struct kvec iov;
int err = -1;
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
xdr_seq = htonl(seq); xdr_seq = kmalloc(4, GFP_KERNEL);
if (!xdr_seq)
return -1;
*xdr_seq = htonl(seq);
iov.iov_base = &xdr_seq; iov.iov_base = xdr_seq;
iov.iov_len = sizeof(xdr_seq); iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_data); xdr_buf_from_iov(&iov, &verf_data);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
mic.data = (u8 *)(p + 1); mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
if (maj_stat != GSS_S_COMPLETE) if (maj_stat != GSS_S_COMPLETE)
return -1; goto out;
*p++ = htonl(mic.len); *p++ = htonl(mic.len);
memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
p += XDR_QUADLEN(mic.len); p += XDR_QUADLEN(mic.len);
if (!xdr_ressize_check(rqstp, p)) if (!xdr_ressize_check(rqstp, p))
return -1; goto out;
return 0; err = 0;
out:
kfree(xdr_seq);
return err;
} }
struct gss_domain { struct gss_domain {
......
...@@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task) ...@@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL; return -EINVAL;
} }
/* svc_rdma_sendto releases this page */
page = alloc_page(RPCRDMA_DEF_GFP); page = alloc_page(RPCRDMA_DEF_GFP);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
rqst->rq_buffer = page_address(page); rqst->rq_buffer = page_address(page);
rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
if (!rqst->rq_rbuffer) {
put_page(page);
return -ENOMEM;
}
return 0; return 0;
} }
static void static void
xprt_rdma_bc_free(struct rpc_task *task) xprt_rdma_bc_free(struct rpc_task *task)
{ {
/* No-op: ctxt and page have already been freed. */ struct rpc_rqst *rqst = task->tk_rqstp;
kfree(rqst->rq_rbuffer);
} }
static int static int
......
...@@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task) ...@@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task)
buf->len = PAGE_SIZE; buf->len = PAGE_SIZE;
rqst->rq_buffer = buf->data; rqst->rq_buffer = buf->data;
rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment