Commit 206d4c6c authored by Al Viro's avatar Al Viro Committed by Greg Kroah-Hartman

fix fault_in_multipages_...() on architectures with no-op access_ok()

commit e23d4159 upstream.

Switching iov_iter fault-in to multipages variants has exposed an old
bug in underlying fault_in_multipages_...(); they break if the range
passed to them wraps around.  Normally access_ok() done by callers will
prevent such (and it's a guaranteed EFAULT - ERR_PTR() values fall into
such a range and they should not point to any valid objects).

However, on architectures where userland and kernel live in different
MMU contexts (e.g. s390) access_ok() is a no-op and on those a range
with a wraparound can reach fault_in_multipages_...().

Since any wraparound means EFAULT there, the fix is trivial - turn
those

    while (uaddr <= end)
	    ...
into

    if (unlikely(uaddr > end))
	    return -EFAULT;
    do
	    ...
    while (uaddr <= end);
Reported-by: default avatarJan Stancek <jstancek@redhat.com>
Tested-by: default avatarJan Stancek <jstancek@redhat.com>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 02d35700
...@@ -571,56 +571,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) ...@@ -571,56 +571,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/ */
static inline int fault_in_multipages_writeable(char __user *uaddr, int size) static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{ {
int ret = 0;
char __user *end = uaddr + size - 1; char __user *end = uaddr + size - 1;
if (unlikely(size == 0)) if (unlikely(size == 0))
return ret; return 0;
if (unlikely(uaddr > end))
return -EFAULT;
/* /*
* Writing zeroes into userspace here is OK, because we know that if * Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it. * the zero gets there, we'll be overwriting it.
*/ */
while (uaddr <= end) { do {
ret = __put_user(0, uaddr); if (unlikely(__put_user(0, uaddr) != 0))
if (ret != 0) return -EFAULT;
return ret;
uaddr += PAGE_SIZE; uaddr += PAGE_SIZE;
} } while (uaddr <= end);
/* Check whether the range spilled into the next page. */ /* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) == if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) ((unsigned long)end & PAGE_MASK))
ret = __put_user(0, end); return __put_user(0, end);
return ret; return 0;
} }
static inline int fault_in_multipages_readable(const char __user *uaddr, static inline int fault_in_multipages_readable(const char __user *uaddr,
int size) int size)
{ {
volatile char c; volatile char c;
int ret = 0;
const char __user *end = uaddr + size - 1; const char __user *end = uaddr + size - 1;
if (unlikely(size == 0)) if (unlikely(size == 0))
return ret; return 0;
while (uaddr <= end) { if (unlikely(uaddr > end))
ret = __get_user(c, uaddr); return -EFAULT;
if (ret != 0)
return ret; do {
if (unlikely(__get_user(c, uaddr) != 0))
return -EFAULT;
uaddr += PAGE_SIZE; uaddr += PAGE_SIZE;
} } while (uaddr <= end);
/* Check whether the range spilled into the next page. */ /* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) == if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) { ((unsigned long)end & PAGE_MASK)) {
ret = __get_user(c, end); return __get_user(c, end);
(void)c;
} }
return ret; return 0;
} }
int add_to_page_cache_locked(struct page *page, struct address_space *mapping, int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment