Commit 9a677341 authored by Al Viro's avatar Al Viro

m32r: switch to RAW_COPY_USER

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 8cd920f2
...@@ -19,6 +19,7 @@ config M32R ...@@ -19,6 +19,7 @@ config M32R
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select DMA_NOOP_OPS select DMA_NOOP_OPS
select ARCH_HAS_RAW_COPY_USER
config SBUS config SBUS
bool bool
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <asm/page.h> #include <asm/page.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <linux/prefetch.h>
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -463,107 +464,22 @@ do { \ ...@@ -463,107 +464,22 @@ do { \
/* We let the __ versions of copy_from/to_user inline, because they're often /* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead. * used in fast paths and have only a small space overhead.
*/ */
static inline unsigned long __generic_copy_from_user_nocheck(void *to, static inline unsigned long
const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
prefetchw(to);
__copy_user(to, from, n); __copy_user(to, from, n);
return n; return n;
} }
static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, static inline unsigned long
const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
prefetch(from);
__copy_user(to, from, n); __copy_user(to, from, n);
return n; return n;
} }
unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define __copy_to_user(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define copy_to_user(to, from, n) \
({ \
might_fault(); \
__generic_copy_to_user((to), (from), (n)); \
})
/**
* __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define __copy_from_user(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define copy_from_user(to, from, n) \
({ \
might_fault(); \
__generic_copy_from_user((to), (from), (n)); \
})
long __must_check strncpy_from_user(char *dst, const char __user *src, long __must_check strncpy_from_user(char *dst, const char __user *src,
long count); long count);
long __must_check __strncpy_from_user(char *dst, long __must_check __strncpy_from_user(char *dst,
......
...@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__generic_copy_from_user);
EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -11,28 +11,6 @@ ...@@ -11,28 +11,6 @@
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
__copy_user(to,from,n);
return n;
}
unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long ret = n;
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
ret = __copy_user(to,from,n);
if (unlikely(ret))
memset(to + n - ret, 0, ret);
return ret;
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment