Commit cbfe51cb authored by Linus Torvalds's avatar Linus Torvalds

Merge clashes between the req_offset() and the XDR cleanups

parents e7e4d66f 0607be17
......@@ -208,7 +208,7 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
/* NOTE: Someone else may have changed the READDIRPLUS flag */
desc->page = page;
desc->ptr = kmap(page);
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
status = find_dirent(desc, page);
if (status < 0)
dir_page_release(desc);
......@@ -345,7 +345,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
NFS_SERVER(inode)->dtsize,
desc->plus);
desc->page = page;
desc->ptr = kmap(page);
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
if (desc->error >= 0) {
if ((status = dir_decode(desc)) == 0)
desc->entry->prev_cookie = desc->target;
......@@ -717,9 +717,9 @@ int nfs_cached_lookup(struct inode *dir, struct dentry *dentry,
res = -EIO;
if (PageUptodate(page)) {
desc.ptr = kmap(page);
desc.ptr = kmap_atomic(page, KM_USER0);
res = find_dirent_name(&desc, page, dentry);
kunmap(page);
kunmap_atomic(desc.ptr, KM_USER0);
}
page_cache_release(page);
......
......@@ -168,6 +168,7 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.writepage = nfs_writepage,
.writepages = nfs_writepages,
.prepare_write = nfs_prepare_write,
......
......@@ -378,7 +378,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
int hdrlen, recvd;
int status, nr;
unsigned int len, pglen;
u32 *end, *entry;
u32 *end, *entry, *kaddr;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
......@@ -398,7 +398,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
p = kmap(*page);
kaddr = p = (u32 *)kmap_atomic(*page, KM_USER0);
end = (u32 *)((char *)p + pglen);
entry = p;
for (nr = 0; *p++; nr++) {
......@@ -419,7 +419,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
if (!nr && (entry[0] != 0 || entry[1] == 0))
goto short_pkt;
out:
kunmap(*page);
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
entry[0] = entry[1] = 0;
......@@ -430,8 +430,8 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
}
goto out;
err_unmap:
kunmap(*page);
return -errno_NFSERR_IO;
nr = -errno_NFSERR_IO;
goto out;
}
u32 *
......@@ -542,7 +542,7 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
strlen = (u32*)kmap(rcvbuf->pages[0]);
strlen = (u32*)kmap_atomic(rcvbuf->pages[0], KM_USER0);
/* Convert length of symlink */
len = ntohl(*strlen);
if (len > rcvbuf->page_len)
......@@ -551,7 +551,7 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
/* NULL terminate the string we got */
string = (char *)(strlen + 1);
string[len] = 0;
kunmap(rcvbuf->pages[0]);
kunmap_atomic(strlen, KM_USER0);
return 0;
}
......
......@@ -730,7 +730,7 @@ nfs3_proc_read_setup(struct nfs_read_data *data, unsigned int count)
req = nfs_list_entry(data->pages.next);
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + req->wb_offset;
data->args.offset = req_offset(req);
data->args.pgbase = req->wb_offset;
data->args.pages = data->pagevec;
data->args.count = count;
......@@ -787,7 +787,7 @@ nfs3_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
req = nfs_list_entry(data->pages.next);
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + req->wb_offset;
data->args.offset = req_offset(req);
data->args.pgbase = req->wb_offset;
data->args.count = count;
data->args.stable = stable;
......
......@@ -488,7 +488,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
int hdrlen, recvd;
int status, nr;
unsigned int len, pglen;
u32 *entry, *end;
u32 *entry, *end, *kaddr;
status = ntohl(*p++);
/* Decode post_op_attrs */
......@@ -518,7 +518,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
p = kmap(*page);
kaddr = p = (u32 *)kmap_atomic(*page, KM_USER0);
end = (u32 *)((char *)p + pglen);
entry = p;
for (nr = 0; *p++; nr++) {
......@@ -563,7 +563,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
if (!nr && (entry[0] != 0 || entry[1] == 0))
goto short_pkt;
out:
kunmap(*page);
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
entry[0] = entry[1] = 0;
......@@ -574,8 +574,8 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
}
goto out;
err_unmap:
kunmap(*page);
return -errno_NFSERR_IO;
nr = -errno_NFSERR_IO;
goto out;
}
u32 *
......@@ -738,7 +738,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
strlen = (u32*)kmap(rcvbuf->pages[0]);
strlen = (u32*)kmap_atomic(rcvbuf->pages[0], KM_USER0);
/* Convert length of symlink */
len = ntohl(*strlen);
if (len > rcvbuf->page_len)
......@@ -747,7 +747,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
/* NULL terminate the string we got */
string = (char *)(strlen + 1);
string[len] = 0;
kunmap(rcvbuf->pages[0]);
kunmap_atomic(strlen, KM_USER0);
return 0;
}
......
......@@ -417,7 +417,7 @@ nfs4_setup_readdir(struct nfs4_compound *cp, u64 cookie, u32 *verifier,
* when talking to the server, we always send cookie 0
* instead of 1 or 2.
*/
start = p = (u32 *)kmap(*pages);
start = p = (u32 *)kmap_atomic(*pages, KM_USER0);
if (cookie == 0) {
*p++ = xdr_one; /* next */
......@@ -445,7 +445,7 @@ nfs4_setup_readdir(struct nfs4_compound *cp, u64 cookie, u32 *verifier,
readdir->rd_pgbase = (char *)p - (char *)start;
readdir->rd_count -= readdir->rd_pgbase;
kunmap(*pages);
kunmap_atomic(start, KM_USER0);
}
static void
......@@ -1371,7 +1371,7 @@ nfs4_proc_read_setup(struct nfs_read_data *data, unsigned int count)
int flags;
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + req->wb_offset;
data->args.offset = req_offset(req);
data->args.pgbase = req->wb_offset;
data->args.pages = data->pagevec;
data->args.count = count;
......@@ -1444,7 +1444,7 @@ nfs4_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
stable = NFS_UNSTABLE;
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + req->wb_offset;
data->args.offset = req_offset(req);
data->args.pgbase = req->wb_offset;
data->args.count = count;
data->args.stable = stable;
......
......@@ -1706,7 +1706,7 @@ decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir
struct page *page = *rcvbuf->pages;
struct iovec *iov = rcvbuf->head;
unsigned int nr, pglen = rcvbuf->page_len;
uint32_t *end, *entry, *p;
uint32_t *end, *entry, *p, *kaddr;
uint32_t len, attrlen, word;
int i, hdrlen, recvd, status;
......@@ -1723,7 +1723,7 @@ decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir
xdr_read_pages(xdr, pglen);
BUG_ON(pglen + readdir->rd_pgbase > PAGE_CACHE_SIZE);
p = (uint32_t *) kmap(page);
kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0);
end = (uint32_t *) ((char *)p + pglen + readdir->rd_pgbase);
entry = p;
for (nr = 0; *p++; nr++) {
......@@ -1769,7 +1769,7 @@ decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir
if (!nr && (entry[0] != 0 || entry[1] == 0))
goto short_pkt;
out:
kunmap(page);
kunmap_atomic(kaddr, KM_USER0);
return 0;
short_pkt:
entry[0] = entry[1] = 0;
......@@ -1780,7 +1780,7 @@ decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir
}
goto out;
err_unmap:
kunmap(page);
kunmap_atomic(kaddr, KM_USER0);
return -errno_NFSERR_IO;
}
......@@ -1811,18 +1811,18 @@ decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readli
* and and null-terminate the text (the VFS expects
* null-termination).
*/
strlen = (uint32_t *) kmap(rcvbuf->pages[0]);
strlen = (uint32_t *) kmap_atomic(rcvbuf->pages[0], KM_USER0);
len = ntohl(*strlen);
if (len > PAGE_CACHE_SIZE - 5) {
printk(KERN_WARNING "nfs: server returned giant symlink!\n");
kunmap(rcvbuf->pages[0]);
kunmap_atomic(strlen, KM_USER0);
return -EIO;
}
*strlen = len;
string = (char *)(strlen + 1);
string[len] = '\0';
kunmap(rcvbuf->pages[0]);
kunmap_atomic(strlen, KM_USER0);
return 0;
}
......
......@@ -563,7 +563,7 @@ nfs_proc_read_setup(struct nfs_read_data *data, unsigned int count)
req = nfs_list_entry(data->pages.next);
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + req->wb_offset;
data->args.offset = req_offset(req);
data->args.pgbase = req->wb_offset;
data->args.pages = data->pagevec;
data->args.count = count;
......@@ -611,7 +611,7 @@ nfs_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
req = nfs_list_entry(data->pages.next);
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + req->wb_offset;
data->args.offset = req_offset(req);
data->args.pgbase = req->wb_offset;
data->args.count = count;
data->args.stable = NFS_FILE_SYNC;
......
......@@ -117,12 +117,8 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
break;
} while (count);
if (count) {
char *kaddr = kmap(page);
memset(kaddr + offset, 0, count);
kunmap(page);
}
flush_dcache_page(page);
if (count)
memclear_highpage_flush(page, offset, count);
SetPageUptodate(page);
if (PageError(page))
ClearPageError(page);
......@@ -181,7 +177,7 @@ nfs_read_rpcsetup(struct list_head *head, struct nfs_read_data *data)
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
count,
(unsigned long long)req_offset(req) + req->wb_offset);
(unsigned long long)req_offset(req));
}
static void
......@@ -270,16 +266,13 @@ nfs_readpage_result(struct rpc_task *task)
if (task->tk_status >= 0) {
if (count < PAGE_CACHE_SIZE) {
char *p = kmap(page);
if (count < req->wb_bytes)
memset(p + req->wb_offset + count, 0,
memclear_highpage_flush(page,
req->wb_offset + count,
req->wb_bytes - count);
kunmap(page);
if (data->res.eof ||
((fattr->valid & NFS_ATTR_FATTR) &&
((req_offset(req) + req->wb_offset + count) >= fattr->size)))
((req_offset(req) + count) >= fattr->size)))
SetPageUptodate(page);
else
if (count < req->wb_bytes)
......@@ -291,14 +284,13 @@ nfs_readpage_result(struct rpc_task *task)
}
} else
SetPageError(page);
flush_dcache_page(page);
unlock_page(page);
dprintk("NFS: read (%s/%Ld %d@%Ld)\n",
req->wb_inode->i_sb->s_id,
(long long)NFS_FILEID(req->wb_inode),
req->wb_bytes,
(long long)(req_offset(req) + req->wb_offset));
(long long)req_offset(req));
nfs_clear_request(req);
nfs_release_request(req);
nfs_unlock_request(req);
......
......@@ -750,7 +750,7 @@ nfs_write_rpcsetup(struct list_head *head, struct nfs_write_data *data, int how)
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
count,
(unsigned long long)req_offset(req) + req->wb_offset);
(unsigned long long)req_offset(req));
}
/*
......@@ -884,7 +884,7 @@ nfs_writeback_done(struct rpc_task *task)
req->wb_inode->i_sb->s_id,
(long long)NFS_FILEID(req->wb_inode),
req->wb_bytes,
(long long)(req_offset(req) + req->wb_offset));
(long long)req_offset(req));
if (task->tk_status < 0) {
ClearPageUptodate(page);
......@@ -940,8 +940,8 @@ nfs_commit_rpcsetup(struct list_head *head, struct nfs_write_data *data, int how
* Determine the offset range of requests in the COMMIT call.
* We rely on the fact that data->pages is an ordered list...
*/
start = req_offset(first) + first->wb_offset;
end = req_offset(last) + (last->wb_offset + last->wb_bytes);
start = req_offset(first);
end = req_offset(last) + last->wb_bytes;
len = end - start;
/* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */
if (end >= inode->i_size || len < 0 || len > (~((u32)0) >> 1))
......@@ -1011,7 +1011,7 @@ nfs_commit_done(struct rpc_task *task)
req->wb_inode->i_sb->s_id,
(long long)NFS_FILEID(req->wb_inode),
req->wb_bytes,
(long long)(req_offset(req) + req->wb_offset));
(long long)req_offset(req));
if (task->tk_status < 0) {
if (req->wb_file)
req->wb_file->f_error = task->tk_status;
......
......@@ -222,7 +222,7 @@ loff_t page_offset(struct page *page)
static inline
loff_t req_offset(struct nfs_page *req)
{
return ((loff_t)req->wb_index) << PAGE_CACHE_SHIFT;
return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset;
}
/*
......
......@@ -306,6 +306,7 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
len = pglen;
ret = copy_actor(desc, kaddr, len);
}
flush_dcache_page(*ppage);
kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
if (ret != len || !desc->count)
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment