Commit a41dad90 authored by Al Viro's avatar Al Viro

iov_iter: saner checks for attempt to copy to/from iterator

instead of "don't do it to ITER_PIPE" check for ->data_source being
false on copying from iterator.  Check for !->data_source for
copying to iterator, while we are at it.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent fc02f337
......@@ -520,6 +520,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (user_backed_iter(i))
......@@ -606,6 +608,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (unlikely(iov_iter_is_pipe(i)))
return copy_mc_pipe_to_iter(addr, bytes, i);
if (user_backed_iter(i))
......@@ -622,10 +626,9 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
if (WARN_ON_ONCE(!i->data_source))
return 0;
}
if (user_backed_iter(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
......@@ -639,10 +642,9 @@ EXPORT_SYMBOL(_copy_from_iter);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
if (WARN_ON_ONCE(!i->data_source))
return 0;
}
iterate_and_advance(i, bytes, base, len, off,
__copy_from_user_inatomic_nocache(addr + off, base, len),
memcpy(addr + off, base, len)
......@@ -671,10 +673,9 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
if (WARN_ON_ONCE(!i->data_source))
return 0;
}
iterate_and_advance(i, bytes, base, len, off,
__copy_from_user_flushcache(addr + off, base, len),
memcpy_flushcache(addr + off, base, len)
......@@ -714,6 +715,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
if (WARN_ON_ONCE(i->data_source))
return 0;
if (unlikely(iov_iter_is_pipe(i)))
return copy_page_to_iter_pipe(page, offset, bytes, i);
page += offset / PAGE_SIZE; // first subpage
......@@ -811,9 +814,8 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
kunmap_atomic(kaddr);
return 0;
}
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
if (WARN_ON_ONCE(!i->data_source)) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, base, len, off,
......@@ -1525,10 +1527,9 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
{
__wsum sum, next;
sum = *csum;
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
if (WARN_ON_ONCE(!i->data_source))
return 0;
}
iterate_and_advance(i, bytes, base, len, off, ({
next = csum_and_copy_from_user(base, addr + off, len);
sum = csum_block_add(sum, next, off);
......@@ -1548,6 +1549,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct csum_state *csstate = _csstate;
__wsum sum, next;
if (WARN_ON_ONCE(i->data_source))
return 0;
if (unlikely(iov_iter_is_discard(i))) {
// can't use csum_memcpy() for that one - data is not copied
csstate->csum = csum_block_add(csstate->csum,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment