Commit 0885eacd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd fixes from Chuck Lever:
 "Notable changes:

   - There is now a backup maintainer for NFSD

  Notable fixes:

   - Prevent array overruns in svc_rdma_build_writes()

   - Prevent buffer overruns when encoding NFSv3 READDIR results

   - Fix a potential UAF in nfsd_file_put()"

* tag 'nfsd-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux:
  SUNRPC: Remove pointer type casts from xdr_get_next_encode_buffer()
  SUNRPC: Clean up xdr_get_next_encode_buffer()
  SUNRPC: Clean up xdr_commit_encode()
  SUNRPC: Optimize xdr_reserve_space()
  SUNRPC: Fix the calculation of xdr->end in xdr_get_next_encode_buffer()
  SUNRPC: Trap RDMA segment overflows
  NFSD: Fix potential use-after-free in nfsd_file_put()
  MAINTAINERS: reciprocal co-maintainership for file locking and nfsd
parents 90add6d4 da9e94fe
......@@ -7654,6 +7654,7 @@ F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
M: Jeff Layton <jlayton@kernel.org>
M: Chuck Lever <chuck.lever@oracle.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/fcntl.c
......@@ -10746,6 +10747,7 @@ W: http://kernelnewbies.org/KernelJanitors
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
M: Chuck Lever <chuck.lever@oracle.com>
M: Jeff Layton <jlayton@kernel.org>
L: linux-nfs@vger.kernel.org
S: Supported
W: http://nfs.sourceforge.net/
......
......@@ -309,11 +309,12 @@ nfsd_file_put(struct nfsd_file *nf)
if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
} else {
} else if (nf->nf_file) {
nfsd_file_put_noref(nf);
if (nf->nf_file)
nfsd_file_schedule_laundrette();
}
nfsd_file_schedule_laundrette();
} else
nfsd_file_put_noref(nf);
if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
nfsd_file_gc();
}
......
......@@ -243,7 +243,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
size_t nbytes);
extern void xdr_commit_encode(struct xdr_stream *xdr);
extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
......@@ -306,6 +306,20 @@ xdr_reset_scratch_buffer(struct xdr_stream *xdr)
xdr_set_scratch_buffer(xdr, NULL, 0);
}
/**
* xdr_commit_encode - Ensure all data is written to xdr->buf
* @xdr: pointer to xdr_stream
*
* Handle encoding across page boundaries by giving the caller a
* temporary location to write to, then later copying the data into
* place. __xdr_commit_encode() does that copying.
*/
static inline void xdr_commit_encode(struct xdr_stream *xdr)
{
if (unlikely(xdr->scratch.iov_len))
__xdr_commit_encode(xdr);
}
/**
* xdr_stream_remaining - Return the number of bytes remaining in the stream
* @xdr: pointer to struct xdr_stream
......
......@@ -919,7 +919,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
EXPORT_SYMBOL_GPL(xdr_init_encode);
/**
* xdr_commit_encode - Ensure all data is written to buffer
* __xdr_commit_encode - Ensure all data is written to buffer
* @xdr: pointer to xdr_stream
*
* We handle encoding across page boundaries by giving the caller a
......@@ -931,26 +931,29 @@ EXPORT_SYMBOL_GPL(xdr_init_encode);
* required at the end of encoding, or any other time when the xdr_buf
* data might be read.
*/
inline void xdr_commit_encode(struct xdr_stream *xdr)
void __xdr_commit_encode(struct xdr_stream *xdr)
{
int shift = xdr->scratch.iov_len;
size_t shift = xdr->scratch.iov_len;
void *page;
if (shift == 0)
return;
page = page_address(*xdr->page_ptr);
memcpy(xdr->scratch.iov_base, page, shift);
memmove(page, page + shift, (void *)xdr->p - page);
xdr_reset_scratch_buffer(xdr);
}
EXPORT_SYMBOL_GPL(xdr_commit_encode);
EXPORT_SYMBOL_GPL(__xdr_commit_encode);
static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
size_t nbytes)
/*
* The buffer space to be reserved crosses the boundary between
* xdr->buf->head and xdr->buf->pages, or between two pages
* in xdr->buf->pages.
*/
static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
size_t nbytes)
{
__be32 *p;
int space_left;
int frag1bytes, frag2bytes;
void *p;
if (nbytes > PAGE_SIZE)
goto out_overflow; /* Bigger buffers require special handling */
......@@ -964,6 +967,7 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
xdr->buf->page_len += frag1bytes;
xdr->page_ptr++;
xdr->iov = NULL;
/*
* If the last encode didn't end exactly on a page boundary, the
* next one will straddle boundaries. Encode into the next
......@@ -972,14 +976,19 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
* space at the end of the previous buffer:
*/
xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
p = page_address(*xdr->page_ptr);
/*
* Note this is where the next encode will start after we've
* shifted this one back:
* xdr->p is where the next encode will start after
* xdr_commit_encode() has shifted this one back:
*/
xdr->p = (void *)p + frag2bytes;
p = page_address(*xdr->page_ptr);
xdr->p = p + frag2bytes;
space_left = xdr->buf->buflen - xdr->buf->len;
xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
if (space_left - nbytes >= PAGE_SIZE)
xdr->end = p + PAGE_SIZE;
else
xdr->end = p + space_left - frag1bytes;
xdr->buf->page_len += frag2bytes;
xdr->buf->len += nbytes;
return p;
......
......@@ -478,10 +478,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
unsigned int write_len;
u64 offset;
seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
if (!seg)
if (info->wi_seg_no >= info->wi_chunk->ch_segcount)
goto out_overflow;
seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
write_len = min(remaining, seg->rs_length - info->wi_seg_off);
if (!write_len)
goto out_overflow;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment