Commit c512c691 authored by Linus Torvalds's avatar Linus Torvalds

uaccess: implement a proper unsafe_copy_to_user() and switch filldir over to it

In commit 9f79b78e ("Convert filldir[64]() from __put_user() to
unsafe_put_user()") I made filldir() use unsafe_put_user(), which
improves code generation on x86 enormously.

But because we didn't have a "unsafe_copy_to_user()", the dirent name
copy was also done by hand with unsafe_put_user() in a loop, and it
turns out that a lot of other architectures didn't like that, because
unlike x86, they have various alignment issues.

Most non-x86 architectures trap and fix it up, and some (like xtensa)
will just fail unaligned put_user() accesses unconditionally.  Which
makes that "copy using put_user() in a loop" not work for them at all.

I could make that code do explicit alignment etc, but the architectures
that don't like unaligned accesses also don't really use the fancy
"user_access_begin/end()" model, so they might just use the regular old
__copy_to_user() interface.

So this commit takes that looping implementation, turns it into the x86
version of "unsafe_copy_to_user()", and makes other architectures
implement the unsafe copy version as __copy_to_user() (the same way they
do for the other unsafe_xyz() accessor functions).

Note that it only does this for the copying _to_ user space, and we
still don't have a unsafe version of copy_from_user().

That's partly because we have no current users of it, but also partly
because the copy_from_user() case is slightly different and cannot
efficiently be implemented in terms of a unsafe_get_user() loop (because
gcc can't do asm goto with outputs).

It would be trivial to do this using "rep movsb", which would work
really nicely on newer x86 cores, but really badly on some older ones.

Al Viro is looking at cleaning up all our user copy routines to make
this all a non-issue, but for now we have this simple-but-stupid version
for x86 that works fine for the dirent name copy case because those
names are short strings and we simply don't need anything fancier.

Fixes: 9f79b78e ("Convert filldir[64]() from __put_user() to unsafe_put_user()")
Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Reported-and-tested-by: default avatarTony Luck <tony.luck@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent da0c9ea1
......@@ -734,5 +734,28 @@ do { \
if (unlikely(__gu_err)) goto err_label; \
} while (0)
/*
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
#define unsafe_copy_loop(dst, src, len, type, label) \
while (len >= sizeof(type)) { \
unsafe_put_user(*(type *)src,(type __user *)dst,label); \
dst += sizeof(type); \
src += sizeof(type); \
len -= sizeof(type); \
}
#define unsafe_copy_to_user(_dst,_src,_len,label) \
do { \
char __user *__ucu_dst = (_dst); \
const char *__ucu_src = (_src); \
size_t __ucu_len = (_len); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
} while (0)
#endif /* _ASM_X86_UACCESS_H */
......@@ -27,53 +27,13 @@
/*
* Note the "unsafe_put_user() semantics: we goto a
* label for errors.
*
* Also note how we use a "while()" loop here, even though
* only the biggest size needs to loop. The compiler (well,
* at least gcc) is smart enough to turn the smaller sizes
* into just if-statements, and this way we don't need to
* care whether 'u64' or 'u32' is the biggest size.
*/
#define unsafe_copy_loop(dst, src, len, type, label) \
while (len >= sizeof(type)) { \
unsafe_put_user(get_unaligned((type *)src), \
(type __user *)dst, label); \
dst += sizeof(type); \
src += sizeof(type); \
len -= sizeof(type); \
}
/*
* We avoid doing 64-bit copies on 32-bit architectures. They
* might be better, but the component names are mostly small,
* and the 64-bit cases can end up being much more complex and
* put much more register pressure on the code, so it's likely
* not worth the pain of unaligned accesses etc.
*
* So limit the copies to "unsigned long" size. I did verify
* that at least the x86-32 case is ok without this limiting,
* but I worry about random other legacy 32-bit cases that
* might not do as well.
*/
#define unsafe_copy_type(dst, src, len, type, label) do { \
if (sizeof(type) <= sizeof(unsigned long)) \
unsafe_copy_loop(dst, src, len, type, label); \
} while (0)
/*
* Copy the dirent name to user space, and NUL-terminate
* it. This should not be a function call, since we're doing
* the copy inside a "user_access_begin/end()" section.
*/
#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \
char __user *dst = (_dst); \
const char *src = (_src); \
size_t len = (_len); \
unsafe_copy_type(dst, src, len, u64, label); \
unsafe_copy_type(dst, src, len, u32, label); \
unsafe_copy_type(dst, src, len, u16, label); \
unsafe_copy_type(dst, src, len, u8, label); \
unsafe_put_user(0, dst, label); \
unsafe_put_user(0, dst+len, label); \
unsafe_copy_to_user(dst, src, len, label); \
} while (0)
......
......@@ -355,8 +355,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
#ifndef user_access_begin
#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment