Commit 68c45a68 authored by Trond Myklebust's avatar Trond Myklebust Committed by Linus Torvalds

[PATCH] RPC client receive deadlock removal on HIGHMEM systems

Remove another class of rpciod deadlocks on HIGHMEM systems. Kick habit of
keeping pages kmap()ed for the entire duration of NFS
read/readdir/readlink operations.

Use struct page directly in RPC client data receive buffer. TCP and UDP
sk->data_ready() bottom-halves copy (and checksum when needed) data into
pages rather than iovecs. atomic_kmap() of single pages is used for the
copy.

include/linux/xdr.h
   Declare structure for copying an sk_buff here rather than in xprt.c.
   Forward declaration of new functions.

include/linux/sunrpc/xprt.h
   RPC client receive buffer changed to use new format 'struct xdr_buf'.

net/sunrpc/clnt.c
   Initialize new format receive buffer.

net/sunrpc/sunrpc_syms.c
   Export xdr_inline_pages(), xdr_shift_buf()

net/sunrpc/xdr.c
   xdr_inline_pages() inlines pages into the receive buffer.
   xdr_partial_copy_from_skb() replaces csum_partial_copy_to_page_cache()
	and copy code in tcp_read_request(). Provides sendfile()-style
	method for copying data from an skb into a struct xdr_buf.
   xdr_shift_buf() replaces xdr_shift_iovec() for when we overestimate
	the size of the RPC/NFS header.

net/sunrpc/xprt.c
   Adapt UDP and TCP receive routines to use new format xdr_buf.

include/linux/nfs_xdr.h
   struct nfs_readargs, nfs_readdirargs, nfs_readlinkargs,
	nfs3_readdirargs, nfs3_readlinkargs all transmit page information.
   struct nfs_readdirres, nfs_readlinkres, nfs3_readlinkres obsoleted.
   struct nfs_rpc_ops->readlink(), readdir(), read() now send pages

fs/nfs/dir.c
   Adapt to new format ->readdir().
   Avoid kmap() around the RPC call.

fs/nfs/read.c
   Adapt to new format ->read() and struct nfs_readargs.

fs/nfs/symlink.c
   Adapt to new format ->readlink().

fs/nfs/proc.c
   Convert nfs_proc_readlink(), nfs_proc_readdir(), nfs_proc_read()

fs/nfs/nfs2xdr.c
   Convert XDR routines to transmit page information.
   Remove duplicate zeroing of pages when server returns a short read.

fs/nfs/nfs3proc.c
   Convert nfs3_proc_readlink(),nfs3_proc_readdir(),nfs3_proc_read()

fs/nfs/nfs3xdr.c
   Convert XDR routines to transmit page information.
   Remove duplicate zeroing of pages when server returns a short read.

Cheers,
  Trond
parent 9f73fdbc
......@@ -100,13 +100,12 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
struct file *file = desc->file;
struct inode *inode = file->f_dentry->d_inode;
struct rpc_cred *cred = nfs_file_cred(file);
void *buffer = kmap(page);
int error;
dfprintk(VFS, "NFS: nfs_readdir_filler() reading cookie %Lu into page %lu.\n", (long long)desc->entry->cookie, page->index);
again:
error = NFS_PROTO(inode)->readdir(inode, cred, desc->entry->cookie, buffer,
error = NFS_PROTO(inode)->readdir(inode, cred, desc->entry->cookie, page,
NFS_SERVER(inode)->dtsize, desc->plus);
/* We requested READDIRPLUS, but the server doesn't grok it */
if (desc->plus && error == -ENOTSUPP) {
......@@ -117,7 +116,6 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
if (error < 0)
goto error;
SetPageUptodate(page);
kunmap(page);
/* Ensure consistent page alignment of the data.
* Note: assumes we have exclusive access to this mapping either
* throught inode->i_sem or some other mechanism.
......@@ -316,12 +314,12 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
status = -ENOMEM;
goto out;
}
desc->page = page;
desc->ptr = kmap(page);
desc->error = NFS_PROTO(inode)->readdir(inode, cred, desc->target,
desc->ptr,
page,
NFS_SERVER(inode)->dtsize,
desc->plus);
desc->page = page;
desc->ptr = kmap(page);
if (desc->error >= 0) {
if ((status = dir_decode(desc)) == 0)
desc->entry->prev_cookie = desc->target;
......
......@@ -220,35 +220,20 @@ static int
nfs_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args)
{
struct rpc_auth *auth = req->rq_task->tk_auth;
int buflen, replen;
unsigned int nr;
unsigned int replen;
u32 offset = (u32)args->offset;
u32 count = args->count;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(args->offset);
*p++ = htonl(args->count);
*p++ = htonl(args->count);
*p++ = htonl(offset);
*p++ = htonl(count);
*p++ = htonl(count);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Get the number of buffers in the receive iovec */
nr = args->nriov;
if (nr+2 > MAX_IOVEC) {
printk(KERN_ERR "NFS: Bad number of iov's in xdr_readargs\n");
return -EINVAL;
}
/* set up reply iovec */
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[0].iov_len = replen;
/* Copy the iovec */
memcpy(req->rq_rvec + 1, args->iov, nr * sizeof(struct iovec));
req->rq_rvec[nr+1].iov_base = (u8 *) req->rq_rvec[0].iov_base + replen;
req->rq_rvec[nr+1].iov_len = buflen - replen;
req->rq_rlen = args->count + buflen;
req->rq_rnr += nr+1;
xdr_inline_pages(&req->rq_rcv_buf, replen,
args->pages, args->pgbase, count);
return 0;
}
......@@ -269,7 +254,7 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
}
recvd = req->rq_rlen - hdrlen;
......@@ -281,7 +266,6 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
dprintk("RPC: readres OK count %d\n", count);
if (count < res->count) {
xdr_zero_iovec(iov+1, req->rq_rnr-2, res->count - count);
res->count = count;
res->eof = 1; /* Silly NFSv3ism which can't be helped */
} else
......@@ -376,32 +360,24 @@ nfs_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs_readdirargs *args)
{
struct rpc_task *task = req->rq_task;
struct rpc_auth *auth = task->tk_auth;
u32 bufsiz = args->bufsiz;
int buflen, replen;
unsigned int replen;
u32 count = args->count;
/*
* Some servers (e.g. HP OS 9.5) seem to expect the buffer size
* to be in longwords ... check whether to convert the size.
*/
if (task->tk_client->cl_flags & NFS_CLNTF_BUFSIZE)
bufsiz = bufsiz >> 2;
count = count >> 2;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(args->cookie);
*p++ = htonl(bufsiz); /* see above */
*p++ = htonl(count); /* see above */
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* set up reply iovec */
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[0].iov_len = replen;
req->rq_rvec[1].iov_base = args->buffer;
req->rq_rvec[1].iov_len = args->bufsiz;
req->rq_rvec[2].iov_base = (u8 *) req->rq_rvec[0].iov_base + replen;
req->rq_rvec[2].iov_len = buflen - replen;
req->rq_rlen = buflen + args->bufsiz;
req->rq_rnr += 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
return 0;
}
......@@ -413,12 +389,15 @@ nfs_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs_readdirargs *args)
* from nfs_readdir for each entry.
*/
static int
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs_readdirres *res)
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct iovec *iov = req->rq_rvec;
int hdrlen;
int status, nr;
u32 *end, *entry, len;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen;
int status, nr;
unsigned int len, pglen;
u32 *end, *entry;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
......@@ -426,13 +405,13 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs_readdirres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
/* Get start and end address of XDR data */
p = (u32 *) iov[1].iov_base;
end = (u32 *) ((u8 *) p + iov[1].iov_len);
pglen = rcvbuf->page_len;
page = rcvbuf->pages;
p = kmap(*page);
end = (u32 *)((char *)p + pglen);
for (nr = 0; *p++; nr++) {
entry = p - 1;
if (p + 2 > end)
......@@ -443,16 +422,21 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs_readdirres *res)
if (len > NFS2_MAXNAMLEN) {
printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)!\n",
len);
return -errno_NFSERR_IO;
goto err_unmap;
}
if (p + 2 > end)
goto short_pkt;
}
kunmap(*page);
return nr;
short_pkt:
kunmap(*page);
printk(KERN_NOTICE "NFS: short packet in readdir reply!\n");
entry[0] = entry[1] = 0;
return nr;
err_unmap:
kunmap(*page);
return -errno_NFSERR_IO;
}
u32 *
......@@ -538,21 +522,16 @@ nfs_xdr_diropres(struct rpc_rqst *req, u32 *p, struct nfs_diropok *res)
static int
nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args)
{
struct rpc_task *task = req->rq_task;
struct rpc_auth *auth = task->tk_auth;
int buflen, replen;
struct rpc_auth *auth = req->rq_task->tk_auth;
unsigned int replen;
u32 count = args->count - 4;
p = xdr_encode_fhandle(p, args->fh);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[0].iov_len = replen;
req->rq_rvec[1].iov_base = args->buffer;
req->rq_rvec[1].iov_len = args->bufsiz;
req->rq_rvec[2].iov_base = (u8 *) req->rq_rvec[0].iov_base + replen;
req->rq_rvec[2].iov_len = buflen - replen;
req->rq_rlen = buflen + args->bufsiz;
req->rq_rnr += 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
return 0;
}
......@@ -560,32 +539,33 @@ nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args
* Decode READLINK reply
*/
static int
nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_readlinkres *res)
nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct iovec *iov = req->rq_rvec;
u32 *strlen;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
int hdrlen;
int status;
unsigned int len;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
strlen = (u32*)res->buffer;
strlen = (u32*)kmap(rcvbuf->pages[0]);
/* Convert length of symlink */
len = ntohl(*strlen);
if (len > res->bufsiz - 5)
len = res->bufsiz - 5;
if (len > rcvbuf->page_len)
len = rcvbuf->page_len;
*strlen = len;
/* NULL terminate the string we got */
string = (char *)(strlen + 1);
string[len] = 0;
kunmap(rcvbuf->pages[0]);
return 0;
}
......
......@@ -171,25 +171,20 @@ nfs3_proc_access(struct inode *inode, int mode, int ruid)
}
static int
nfs3_proc_readlink(struct inode *inode, void *buffer, unsigned int buflen)
nfs3_proc_readlink(struct inode *inode, struct page *page)
{
struct nfs_fattr fattr;
struct nfs3_readlinkargs args = {
fh: NFS_FH(inode),
buffer: buffer,
bufsiz: buflen
};
struct nfs3_readlinkres res = {
fattr: &fattr,
buffer: buffer,
bufsiz: buflen
count: PAGE_CACHE_SIZE,
pages: &page
};
int status;
dprintk("NFS call readlink\n");
fattr.valid = 0;
status = rpc_call(NFS_CLIENT(inode), NFS3PROC_READLINK,
&args, &res, 0);
&args, &fattr, 0);
nfs_refresh_inode(inode, &fattr);
dprintk("NFS reply readlink: %d\n", status);
return status;
......@@ -198,14 +193,16 @@ nfs3_proc_readlink(struct inode *inode, void *buffer, unsigned int buflen)
static int
nfs3_proc_read(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, int flags,
loff_t offset, unsigned int count, void *buffer, int *eofp)
unsigned int base, unsigned int count, struct page *page,
int *eofp)
{
u64 offset = page_offset(page) + base;
struct nfs_readargs arg = {
fh: NFS_FH(inode),
offset: offset,
count: count,
nriov: 1,
iov: {{buffer, count}, {0,0}, {0,0}, {0,0}, {0,0}, {0,0}, {0,0}, {0,0}}
pgbase: base,
pages: &page
};
struct nfs_readres res = {
fattr: fattr,
......@@ -542,16 +539,22 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
*/
static int
nfs3_proc_readdir(struct inode *dir, struct rpc_cred *cred,
u64 cookie, void *entry,
unsigned int size, int plus)
u64 cookie, struct page *page, unsigned int count, int plus)
{
struct nfs_fattr dir_attr;
u32 *verf = NFS_COOKIEVERF(dir);
struct nfs3_readdirargs arg = {
fh: NFS_FH(dir),
cookie: cookie,
verf: {verf[0], verf[1]},
plus: plus,
count: count,
pages: &page
};
struct nfs3_readdirres res = {
dir_attr: &dir_attr,
verf: verf,
plus: plus
};
struct rpc_message msg = {
rpc_proc: NFS3PROC_READDIR,
......@@ -559,21 +562,10 @@ nfs3_proc_readdir(struct inode *dir, struct rpc_cred *cred,
rpc_resp: &res,
rpc_cred: cred
};
u32 *verf = NFS_COOKIEVERF(dir);
int status;
lock_kernel();
arg.buffer = entry;
arg.bufsiz = size;
arg.verf[0] = verf[0];
arg.verf[1] = verf[1];
arg.plus = plus;
res.buffer = entry;
res.bufsiz = size;
res.verf = verf;
res.plus = plus;
if (plus)
msg.rpc_proc = NFS3PROC_READDIRPLUS;
......
......@@ -347,35 +347,18 @@ static int
nfs3_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args)
{
struct rpc_auth *auth = req->rq_task->tk_auth;
int buflen, replen;
unsigned int nr;
unsigned int replen;
u32 count = args->count;
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_hyper(p, args->offset);
*p++ = htonl(args->count);
*p++ = htonl(count);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Get the number of buffers in the receive iovec */
nr = args->nriov;
if (nr+2 > MAX_IOVEC) {
printk(KERN_ERR "NFS: Bad number of iov's in xdr_readargs\n");
return -EINVAL;
}
/* set up reply iovec */
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[0].iov_len = replen;
/* Copy the iovec */
memcpy(req->rq_rvec + 1, args->iov, nr * sizeof(struct iovec));
req->rq_rvec[nr+1].iov_base = (u8 *) req->rq_rvec[0].iov_base + replen;
req->rq_rvec[nr+1].iov_len = buflen - replen;
req->rq_rlen = args->count + buflen;
req->rq_rnr += nr+1;
xdr_inline_pages(&req->rq_rcv_buf, replen,
args->pages, args->pgbase, count);
return 0;
}
......@@ -500,7 +483,8 @@ static int
nfs3_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs3_readdirargs *args)
{
struct rpc_auth *auth = req->rq_task->tk_auth;
int buflen, replen;
unsigned int replen;
u32 count = args->count;
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_hyper(p, args->cookie);
......@@ -509,22 +493,14 @@ nfs3_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs3_readdirargs *args
if (args->plus) {
/* readdirplus: need dircount + buffer size.
* We just make sure we make dircount big enough */
*p++ = htonl(args->bufsiz >> 3);
*p++ = htonl(count >> 3);
}
*p++ = htonl(args->bufsiz);
*p++ = htonl(count);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* set up reply iovec */
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[0].iov_len = replen;
req->rq_rvec[1].iov_base = args->buffer;
req->rq_rvec[1].iov_len = args->bufsiz;
req->rq_rvec[2].iov_base = (u8 *) req->rq_rvec[0].iov_base + replen;
req->rq_rvec[2].iov_len = buflen - replen;
req->rq_rlen = buflen + args->bufsiz;
req->rq_rnr += 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
return 0;
}
......@@ -535,11 +511,13 @@ nfs3_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs3_readdirargs *args
static int
nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
{
struct iovec *iov = req->rq_rvec;
int hdrlen;
int status, nr;
unsigned int len;
u32 *entry, *end;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen;
int status, nr;
unsigned int len, pglen;
u32 *entry, *end;
status = ntohl(*p++);
/* Decode post_op_attrs */
......@@ -557,11 +535,13 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
p = (u32 *) iov[1].iov_base;
end = (u32 *) ((u8 *) p + iov[1].iov_len);
pglen = rcvbuf->page_len;
page = rcvbuf->pages;
p = kmap(*page);
end = (u32 *)((char *)p + pglen);
for (nr = 0; *p++; nr++) {
entry = p - 1;
if (p + 3 > end)
......@@ -572,7 +552,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
if (len > NFS3_MAXNAMLEN) {
printk(KERN_WARNING "NFS: giant filename in readdir (len %x)!\n",
len);
return -errno_NFSERR_IO;
goto err_unmap;
}
if (res->plus) {
......@@ -592,7 +572,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
if (len > NFS3_FHSIZE) {
printk(KERN_WARNING "NFS: giant filehandle in "
"readdir (len %x)!\n", len);
return -errno_NFSERR_IO;
goto err_unmap;
}
p += XDR_QUADLEN(len);
}
......@@ -601,13 +581,17 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
if (p + 2 > end)
goto short_pkt;
}
kunmap(*page);
return nr;
short_pkt:
kunmap(*page);
printk(KERN_NOTICE "NFS: short packet in readdir reply!\n");
/* truncate listing */
entry[0] = entry[1] = 0;
return nr;
err_unmap:
kunmap(*page);
return -errno_NFSERR_IO;
}
u32 *
......@@ -742,21 +726,16 @@ nfs3_xdr_accessres(struct rpc_rqst *req, u32 *p, struct nfs3_accessres *res)
static int
nfs3_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs3_readlinkargs *args)
{
struct rpc_task *task = req->rq_task;
struct rpc_auth *auth = task->tk_auth;
int buflen, replen;
struct rpc_auth *auth = req->rq_task->tk_auth;
unsigned int replen;
u32 count = args->count - 4;
p = xdr_encode_fhandle(p, args->fh);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[0].iov_len = replen;
req->rq_rvec[1].iov_base = args->buffer;
req->rq_rvec[1].iov_len = args->bufsiz;
req->rq_rvec[2].iov_base = (u8 *) req->rq_rvec[0].iov_base + replen;
req->rq_rvec[2].iov_len = buflen - replen;
req->rq_rlen = buflen + args->bufsiz;
req->rq_rnr += 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
return 0;
}
......@@ -764,17 +743,17 @@ nfs3_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs3_readlinkargs *ar
* Decode READLINK reply
*/
static int
nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs3_readlinkres *res)
nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
{
struct iovec *iov = req->rq_rvec;
int hdrlen;
u32 *strlen;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
int status;
unsigned int len;
status = ntohl(*p++);
p = xdr_decode_post_op_attr(p, res->fattr);
p = xdr_decode_post_op_attr(p, fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
......@@ -782,18 +761,19 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs3_readlinkres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
strlen = (u32*)res->buffer;
strlen = (u32*)kmap(rcvbuf->pages[0]);
/* Convert length of symlink */
len = ntohl(*strlen);
if (len > res->bufsiz - 5)
len = res->bufsiz - 5;
if (len > rcvbuf->page_len)
len = rcvbuf->page_len;
*strlen = len;
/* NULL terminate the string we got */
string = (char *)(strlen + 1);
string[len] = 0;
kunmap(rcvbuf->pages[0]);
return 0;
}
......@@ -827,7 +807,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
}
recvd = req->rq_rlen - hdrlen;
......@@ -837,10 +817,8 @@ nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
count = recvd;
}
if (count < res->count) {
xdr_zero_iovec(iov+1, req->rq_rnr-2, res->count - count);
if (count < res->count)
res->count = count;
}
return count;
}
......
......@@ -117,22 +117,17 @@ nfs_proc_lookup(struct inode *dir, struct qstr *name,
}
static int
nfs_proc_readlink(struct inode *inode, void *buffer, unsigned int bufsiz)
nfs_proc_readlink(struct inode *inode, struct page *page)
{
struct nfs_readlinkargs args = {
fh: NFS_FH(inode),
buffer: buffer,
bufsiz: bufsiz
};
struct nfs_readlinkres res = {
buffer: buffer,
bufsiz: bufsiz
count: PAGE_CACHE_SIZE,
pages: &page
};
int status;
dprintk("NFS call readlink\n");
status = rpc_call(NFS_CLIENT(inode), NFSPROC_READLINK,
&args, &res, 0);
status = rpc_call(NFS_CLIENT(inode), NFSPROC_READLINK, &args, NULL, 0);
dprintk("NFS reply readlink: %d\n", status);
return status;
}
......@@ -140,14 +135,16 @@ nfs_proc_readlink(struct inode *inode, void *buffer, unsigned int bufsiz)
static int
nfs_proc_read(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, int flags,
loff_t offset, unsigned int count, void *buffer, int *eofp)
unsigned int base, unsigned int count, struct page *page,
int *eofp)
{
u64 offset = page_offset(page) + base;
struct nfs_readargs arg = {
fh: NFS_FH(inode),
offset: offset,
count: count,
nriov: 1,
iov: {{ buffer, count }, {0,0}, {0,0}, {0,0}, {0,0}, {0,0}, {0,0}, {0,0}}
pgbase: base,
pages: &page
};
struct nfs_readres res = {
fattr: fattr,
......@@ -429,28 +426,24 @@ nfs_proc_rmdir(struct inode *dir, struct qstr *name)
*/
static int
nfs_proc_readdir(struct inode *dir, struct rpc_cred *cred,
__u64 cookie, void *entry,
unsigned int size, int plus)
u64 cookie, struct page *page, unsigned int count, int plus)
{
struct nfs_readdirargs arg;
struct nfs_readdirres res;
struct nfs_readdirargs arg = {
fh: NFS_FH(dir),
cookie: cookie,
count: count,
pages: &page
};
struct rpc_message msg = {
rpc_proc: NFSPROC_READDIR,
rpc_argp: &arg,
rpc_resp: &res,
rpc_resp: NULL,
rpc_cred: cred
};
int status;
lock_kernel();
arg.fh = NFS_FH(dir);
arg.cookie = cookie;
arg.buffer = entry;
arg.bufsiz = size;
res.buffer = entry;
res.bufsiz = size;
dprintk("NFS call readdir %d\n", (unsigned int)cookie);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
......
......@@ -42,6 +42,7 @@ struct nfs_read_data {
struct nfs_readres res; /* ... and result struct */
struct nfs_fattr fattr; /* fattr storage */
struct list_head pages; /* Coalesced read requests */
struct page *pagevec[NFS_READ_MAXIOV];
};
/*
......@@ -63,6 +64,7 @@ static __inline__ struct nfs_read_data *nfs_readdata_alloc(void)
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->args.pages = p->pagevec;
}
return p;
}
......@@ -86,11 +88,10 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
{
struct rpc_cred *cred = NULL;
struct nfs_fattr fattr;
loff_t offset = page_offset(page);
char *buffer;
int rsize = NFS_SERVER(inode)->rsize;
unsigned int offset = 0;
unsigned int rsize = NFS_SERVER(inode)->rsize;
unsigned int count = PAGE_CACHE_SIZE;
int result;
int count = PAGE_CACHE_SIZE;
int flags = IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0;
int eof;
......@@ -103,20 +104,19 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
* This works now because the socket layer never tries to DMA
* into this buffer directly.
*/
buffer = kmap(page);
do {
if (count < rsize)
rsize = count;
dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Ld, %d, %p)\n",
dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
NFS_SERVER(inode)->hostname,
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
(long long)offset, rsize, buffer);
(unsigned long long)offset, rsize);
lock_kernel();
result = NFS_PROTO(inode)->read(inode, cred, &fattr, flags,
offset, rsize, buffer, &eof);
offset, rsize, page, &eof);
nfs_refresh_inode(inode, &fattr);
unlock_kernel();
......@@ -131,12 +131,15 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
}
count -= result;
offset += result;
buffer += result;
if (result < rsize) /* NFSv2ism */
break;
} while (count);
memset(buffer, 0, count);
if (count) {
char *kaddr = kmap(page);
memset(kaddr + offset, 0, count);
kunmap(page);
}
flush_dcache_page(page);
SetPageUptodate(page);
if (PageError(page))
......@@ -144,7 +147,6 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
result = 0;
io_error:
kunmap(page);
unlock_page(page);
return result;
}
......@@ -189,26 +191,24 @@ static void
nfs_read_rpcsetup(struct list_head *head, struct nfs_read_data *data)
{
struct nfs_page *req;
struct iovec *iov;
struct page **pages;
unsigned int count;
iov = data->args.iov;
pages = data->args.pages;
count = 0;
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
iov->iov_base = kmap(req->wb_page) + req->wb_offset;
iov->iov_len = req->wb_bytes;
*pages++ = req->wb_page;
count += req->wb_bytes;
iov++;
data->args.nriov++;
}
req = nfs_list_entry(data->pages.next);
data->inode = req->wb_inode;
data->cred = req->wb_cred;
data->args.fh = NFS_FH(req->wb_inode);
data->args.offset = page_offset(req->wb_page) + req->wb_offset;
data->args.pgbase = req->wb_offset;
data->args.count = count;
data->res.fattr = &data->fattr;
data->res.count = count;
......@@ -269,11 +269,12 @@ nfs_pagein_one(struct list_head *head, struct inode *inode)
msg.rpc_cred = data->cred;
/* Start the async call */
dprintk("NFS: %4d initiated read call (req %s/%Ld count %d nriov %d.\n",
dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu.\n",
task->tk_pid,
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
data->args.count, data->args.nriov);
(unsigned int)data->args.count,
(unsigned long long)data->args.offset);
rpc_clnt_sigmask(clnt, &oldset);
rpc_call_setup(task, &msg, 0);
......@@ -429,7 +430,6 @@ nfs_readpage_result(struct rpc_task *task)
} else
SetPageError(page);
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
dprintk("NFS: read (%s/%Ld %d@%Ld)\n",
......
......@@ -29,7 +29,6 @@
*/
static int nfs_symlink_filler(struct inode *inode, struct page *page)
{
void *buffer = kmap(page);
int error;
/* We place the length at the beginning of the page,
......@@ -37,19 +36,16 @@ static int nfs_symlink_filler(struct inode *inode, struct page *page)
* XDR response verification will NULL terminate it.
*/
lock_kernel();
error = NFS_PROTO(inode)->readlink(inode, buffer,
PAGE_CACHE_SIZE - sizeof(u32)-4);
error = NFS_PROTO(inode)->readlink(inode, page);
unlock_kernel();
if (error < 0)
goto error;
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
error:
SetPageError(page);
kunmap(page);
unlock_page(page);
return -EIO;
}
......
......@@ -68,8 +68,8 @@ struct nfs_readargs {
struct nfs_fh * fh;
__u64 offset;
__u32 count;
unsigned int nriov;
struct iovec iov[NFS_READ_MAXIOV];
unsigned int pgbase;
struct page ** pages;
};
struct nfs_readres {
......@@ -165,8 +165,8 @@ struct nfs_symlinkargs {
struct nfs_readdirargs {
struct nfs_fh * fh;
__u32 cookie;
void * buffer;
unsigned int bufsiz;
unsigned int count;
struct page ** pages;
};
struct nfs_diropok {
......@@ -176,18 +176,8 @@ struct nfs_diropok {
struct nfs_readlinkargs {
struct nfs_fh * fh;
void * buffer;
unsigned int bufsiz;
};
struct nfs_readlinkres {
void * buffer;
unsigned int bufsiz;
};
struct nfs_readdirres {
void * buffer;
unsigned int bufsiz;
unsigned int count;
struct page ** pages;
};
struct nfs3_sattrargs {
......@@ -262,9 +252,9 @@ struct nfs3_readdirargs {
struct nfs_fh * fh;
__u64 cookie;
__u32 verf[2];
void * buffer;
unsigned int bufsiz;
int plus;
unsigned int count;
struct page ** pages;
};
struct nfs3_diropres {
......@@ -280,14 +270,8 @@ struct nfs3_accessres {
struct nfs3_readlinkargs {
struct nfs_fh * fh;
void * buffer;
unsigned int bufsiz;
};
struct nfs3_readlinkres {
struct nfs_fattr * fattr;
void * buffer;
unsigned int bufsiz;
unsigned int count;
struct page ** pages;
};
struct nfs3_renameres {
......@@ -303,8 +287,6 @@ struct nfs3_linkres {
struct nfs3_readdirres {
struct nfs_fattr * dir_attr;
__u32 * verf;
void * buffer;
unsigned int bufsiz;
int plus;
};
......@@ -322,11 +304,11 @@ struct nfs_rpc_ops {
int (*lookup) (struct inode *, struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
int (*access) (struct inode *, int , int);
int (*readlink)(struct inode *, void *, unsigned int);
int (*readlink)(struct inode *, struct page *);
int (*read) (struct inode *, struct rpc_cred *,
struct nfs_fattr *,
int, loff_t, unsigned int,
void *buffer, int *eofp);
int, unsigned int, unsigned int,
struct page *, int *eofp);
int (*write) (struct inode *, struct rpc_cred *,
struct nfs_fattr *,
int, unsigned int, unsigned int,
......@@ -349,7 +331,7 @@ struct nfs_rpc_ops {
struct nfs_fh *, struct nfs_fattr *);
int (*rmdir) (struct inode *, struct qstr *);
int (*readdir) (struct inode *, struct rpc_cred *,
u64, void *, unsigned int, int);
u64, struct page *, unsigned int, int);
int (*mknod) (struct inode *, struct qstr *, struct iattr *,
dev_t, struct nfs_fh *, struct nfs_fattr *);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
......
......@@ -95,6 +95,8 @@ u32 * xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len);
void xdr_encode_pages(struct xdr_buf *, struct page **, unsigned int,
unsigned int);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
/*
* Decode 64bit quantities (NFSv3 support)
......@@ -127,11 +129,39 @@ xdr_adjust_iovec(struct iovec *iov, u32 *p)
void xdr_shift_iovec(struct iovec *, int, size_t);
void xdr_zero_iovec(struct iovec *, int, size_t);
/*
* Maximum number of iov's we use.
*/
#define MAX_IOVEC (12)
/*
* XDR buffer helper functions
*/
extern int xdr_kmap(struct iovec *, struct xdr_buf *, unsigned int);
extern void xdr_kunmap(struct xdr_buf *, unsigned int);
extern void xdr_shift_buf(struct xdr_buf *, unsigned int);
extern void xdr_zero_buf(struct xdr_buf *, unsigned int);
/*
* Helper structure for copying from an sk_buff.
*/
typedef struct {
struct sk_buff *skb;
unsigned int offset;
size_t count;
unsigned int csum;
} skb_reader_t;
typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
extern void xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
skb_reader_t *, skb_read_actor_t);
extern int xdr_copy_skb(struct xdr_buf *xdr, unsigned int base,
struct sk_buff *skb, unsigned int offset);
extern int xdr_copy_and_csum_skb(struct xdr_buf *xdr, unsigned int base,
struct sk_buff *skb, unsigned int offset, unsigned int csum);
#endif /* __KERNEL__ */
......
......@@ -15,11 +15,6 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h>
/*
* Maximum number of iov's we use.
*/
#define MAX_IOVEC 10
/*
* The transport code maintains an estimate on the maximum number of out-
* standing RPC requests, using a smoothed version of the congestion
......@@ -89,7 +84,7 @@ struct rpc_rqst {
struct rpc_xprt * rq_xprt; /* RPC client */
struct rpc_timeout rq_timeout; /* timeout parms */
struct xdr_buf rq_snd_buf; /* send buffer */
struct rpc_iov rq_rcv_buf; /* recv buffer */
struct xdr_buf rq_rcv_buf; /* recv buffer */
/*
* This is the private part
......@@ -116,9 +111,8 @@ struct rpc_rqst {
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
#define rq_rvec rq_rcv_buf.io_vec
#define rq_rnr rq_rcv_buf.io_nr
#define rq_rlen rq_rcv_buf.io_len
#define rq_rvec rq_rcv_buf.head
#define rq_rlen rq_rcv_buf.len
#define XPRT_LAST_FRAG (1 << 0)
#define XPRT_COPY_RECM (1 << 1)
......
......@@ -462,6 +462,7 @@ call_encode(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
struct xdr_buf *sndbuf = &req->rq_snd_buf;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
unsigned int bufsiz;
kxdrproc_t encode;
int status;
......@@ -479,10 +480,11 @@ call_encode(struct rpc_task *task)
sndbuf->tail[0].iov_len = 0;
sndbuf->page_len = 0;
sndbuf->len = 0;
req->rq_rvec[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
req->rq_rvec[0].iov_len = bufsiz;
req->rq_rlen = bufsiz;
req->rq_rnr = 1;
rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
rcvbuf->head[0].iov_len = bufsiz;
rcvbuf->tail[0].iov_len = 0;
rcvbuf->page_len = 0;
rcvbuf->len = bufsiz;
/* Zero buffer so we have automatic zero-padding of opaque & string */
memset(task->tk_buffer, 0, bufsiz);
......
......@@ -97,8 +97,8 @@ EXPORT_SYMBOL(xdr_decode_string_inplace);
EXPORT_SYMBOL(xdr_decode_netobj);
EXPORT_SYMBOL(xdr_encode_netobj);
EXPORT_SYMBOL(xdr_encode_pages);
EXPORT_SYMBOL(xdr_shift_iovec);
EXPORT_SYMBOL(xdr_zero_iovec);
EXPORT_SYMBOL(xdr_inline_pages);
EXPORT_SYMBOL(xdr_shift_buf);
/* Debugging symbols */
#ifdef RPC_DEBUG
......
......@@ -120,6 +120,27 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
xdr->len += len;
}
void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
struct iovec *head = xdr->head;
struct iovec *tail = xdr->tail;
char *buf = (char *)head->iov_base;
unsigned int buflen = head->iov_len;
head->iov_len = offset;
xdr->pages = pages;
xdr->page_base = base;
xdr->page_len = len;
tail->iov_base = buf + offset;
tail->iov_len = buflen - offset;
xdr->len += len;
}
/*
* Realign the iovec if the server missed out some reply elements
......@@ -251,3 +272,72 @@ void xdr_kunmap(struct xdr_buf *xdr, unsigned int base)
ppage++;
}
}
void
xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
skb_reader_t *desc,
skb_read_actor_t copy_actor)
{
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
int ret;
len = xdr->head[0].iov_len;
if (base < len) {
len -= base;
ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
if (ret != len || !desc->count)
return;
base = 0;
} else
base -= len;
if (pglen == 0)
goto copy_tail;
if (base >= pglen) {
base -= pglen;
goto copy_tail;
}
if (base || xdr->page_base) {
pglen -= base;
base += xdr->page_base;
ppage += base >> PAGE_CACHE_SHIFT;
base &= ~PAGE_CACHE_MASK;
}
do {
char *kaddr;
len = PAGE_CACHE_SIZE;
kaddr = kmap_atomic(*ppage, KM_USER0);
if (base) {
len -= base;
if (pglen < len)
len = pglen;
ret = copy_actor(desc, kaddr + base, len);
base = 0;
} else {
if (pglen < len)
len = pglen;
ret = copy_actor(desc, kaddr, len);
}
kunmap_atomic(kaddr, KM_USER0);
if (ret != len || !desc->count)
return;
ppage++;
} while ((pglen -= len) != 0);
copy_tail:
len = xdr->tail[0].iov_len;
if (len)
copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len);
}
void
xdr_shift_buf(struct xdr_buf *xdr, size_t len)
{
struct iovec iov[MAX_IOVEC];
unsigned int nr;
nr = xdr_kmap(iov, xdr, 0);
xdr_shift_iovec(iov, nr, len);
xdr_kunmap(xdr, 0);
}
......@@ -557,58 +557,60 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
return;
}
static size_t
skb_read_bits(skb_reader_t *desc, void *to, size_t len)
{
if (len > desc->count)
len = desc->count;
skb_copy_bits(desc->skb, desc->offset, to, len);
desc->count -= len;
desc->offset += len;
return len;
}
static size_t
skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
{
unsigned int csum2, pos;
if (len > desc->count)
len = desc->count;
pos = desc->offset;
csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
desc->csum = csum_block_add(desc->csum, csum2, pos);
desc->count -= len;
desc->offset += len;
return len;
}
/*
* We have set things up such that we perform the checksum of the UDP
* packet in parallel with the copies into the RPC client iovec. -DaveM
*/
static int csum_partial_copy_to_page_cache(struct iovec *iov,
struct sk_buff *skb,
int copied)
static int
csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
{
int offset = sizeof(struct udphdr);
__u8 *cur_ptr = iov->iov_base;
__kernel_size_t cur_len = iov->iov_len;
unsigned int csum = skb->csum;
int need_csum = (skb->ip_summed != CHECKSUM_UNNECESSARY);
int slack = skb->len - copied - sizeof(struct udphdr);
if (need_csum)
csum = csum_partial(skb->data, sizeof(struct udphdr), csum);
while (copied > 0) {
if (cur_len) {
int to_move = cur_len;
if (to_move > copied)
to_move = copied;
if (need_csum) {
unsigned int csum2;
csum2 = skb_copy_and_csum_bits(skb, offset,
cur_ptr,
to_move, 0);
csum = csum_block_add(csum, csum2, offset);
} else
skb_copy_bits(skb, offset, cur_ptr, to_move);
offset += to_move;
copied -= to_move;
cur_ptr += to_move;
cur_len -= to_move;
}
if (cur_len <= 0) {
iov++;
cur_len = iov->iov_len;
cur_ptr = iov->iov_base;
}
}
if (need_csum) {
if (slack > 0) {
unsigned int csum2;
skb_reader_t desc;
csum2 = skb_checksum(skb, offset, slack, 0);
csum = csum_block_add(csum, csum2, offset);
}
if ((unsigned short)csum_fold(csum))
return -1;
desc.skb = skb;
desc.offset = sizeof(struct udphdr);
desc.count = skb->len - desc.offset;
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
goto no_checksum;
desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits);
if (desc.offset != skb->len) {
unsigned int csum2;
csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
}
if ((unsigned short)csum_fold(desc.csum))
return -1;
return 0;
no_checksum:
xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits);
return 0;
}
......@@ -659,7 +661,7 @@ udp_data_ready(struct sock *sk, int len)
copied = repsize;
/* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_page_cache(rovr->rq_rvec, skb, copied))
if (csum_partial_copy_to_xdr(&rovr->rq_rcv_buf, skb))
goto out_unlock;
/* Something worked... */
......@@ -677,12 +679,6 @@ udp_data_ready(struct sock *sk, int len)
wake_up_interruptible(sk->sleep);
}
typedef struct {
struct sk_buff *skb;
unsigned offset;
size_t count;
} skb_reader_t;
/*
* Copy from an skb into memory and shrink the skb.
*/
......@@ -773,11 +769,8 @@ static inline void
tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
{
struct rpc_rqst *req;
struct iovec *iov;
char *p;
unsigned long skip;
size_t len, used;
int n;
struct xdr_buf *rcvbuf;
size_t len;
/* Find and lock the request corresponding to this xid */
req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
......@@ -787,36 +780,30 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
xprt->tcp_xid);
return;
}
skip = xprt->tcp_copied;
iov = req->rq_rvec;
for (n = req->rq_rnr; n != 0; n--, iov++) {
if (skip >= iov->iov_len) {
skip -= iov->iov_len;
continue;
}
p = iov->iov_base;
len = iov->iov_len;
if (skip) {
p += skip;
len -= skip;
skip = 0;
}
if (xprt->tcp_offset + len > xprt->tcp_reclen)
len = xprt->tcp_reclen - xprt->tcp_offset;
used = tcp_copy_data(desc, p, len);
xprt->tcp_copied += used;
xprt->tcp_offset += used;
if (used != len)
break;
if (xprt->tcp_copied == req->rq_rlen) {
rcvbuf = &req->rq_rcv_buf;
len = desc->count;
if (len > xprt->tcp_reclen - xprt->tcp_offset) {
skb_reader_t my_desc;
len = xprt->tcp_reclen - xprt->tcp_offset;
memcpy(&my_desc, desc, sizeof(my_desc));
my_desc.count = len;
xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
&my_desc, tcp_copy_data);
desc->count -= len;
desc->offset += len;
} else
xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
desc, tcp_copy_data);
xprt->tcp_copied += len;
xprt->tcp_offset += len;
if (xprt->tcp_copied == req->rq_rlen)
xprt->tcp_flags &= ~XPRT_COPY_DATA;
else if (xprt->tcp_offset == xprt->tcp_reclen) {
if (xprt->tcp_flags & XPRT_LAST_FRAG)
xprt->tcp_flags &= ~XPRT_COPY_DATA;
break;
}
if (xprt->tcp_offset == xprt->tcp_reclen) {
if (xprt->tcp_flags & XPRT_LAST_FRAG)
xprt->tcp_flags &= ~XPRT_COPY_DATA;
break;
}
}
if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment