Commit b4bbdcef authored by David S. Miller's avatar David S. Miller Committed by Greg Kroah-Hartman

sparc64: Delete now unused user copy fixup functions.

[ Upstream commit 0fd0ff01 ]

Now that all of the user copy routines are converted to return
accurate residual lengths when an exception occurs, we no longer need
the broken fixup routines.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cb85910b
...@@ -204,58 +204,30 @@ int __get_user_bad(void); ...@@ -204,58 +204,30 @@ int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to, unsigned long __must_check ___copy_from_user(void *to,
const void __user *from, const void __user *from,
unsigned long size); unsigned long size);
unsigned long copy_from_user_fixup(void *to, const void __user *from,
unsigned long size);
static inline unsigned long __must_check static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size) copy_from_user(void *to, const void __user *from, unsigned long size)
{ {
unsigned long ret = ___copy_from_user(to, from, size); return ___copy_from_user(to, from, size);
if (unlikely(ret)) {
if ((long)ret < 0)
ret = copy_from_user_fixup(to, from, size);
return ret;
}
return ret;
} }
#define __copy_from_user copy_from_user #define __copy_from_user copy_from_user
unsigned long __must_check ___copy_to_user(void __user *to, unsigned long __must_check ___copy_to_user(void __user *to,
const void *from, const void *from,
unsigned long size); unsigned long size);
unsigned long copy_to_user_fixup(void __user *to, const void *from,
unsigned long size);
static inline unsigned long __must_check static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size) copy_to_user(void __user *to, const void *from, unsigned long size)
{ {
unsigned long ret = ___copy_to_user(to, from, size); return ___copy_to_user(to, from, size);
if (unlikely(ret)) {
if ((long)ret < 0)
ret = copy_to_user_fixup(to, from, size);
return ret;
}
return ret;
} }
#define __copy_to_user copy_to_user #define __copy_to_user copy_to_user
unsigned long __must_check ___copy_in_user(void __user *to, unsigned long __must_check ___copy_in_user(void __user *to,
const void __user *from, const void __user *from,
unsigned long size); unsigned long size);
unsigned long copy_in_user_fixup(void __user *to, void __user *from,
unsigned long size);
static inline unsigned long __must_check static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size) copy_in_user(void __user *to, void __user *from, unsigned long size)
{ {
unsigned long ret = ___copy_in_user(to, from, size); return ___copy_in_user(to, from, size);
if (unlikely(ret)) {
if ((long)ret < 0)
ret = copy_in_user_fixup(to, from, size);
return ret;
}
return ret;
} }
#define __copy_in_user copy_in_user #define __copy_in_user copy_in_user
......
...@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o ...@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o obj-$(CONFIG_SPARC64) += iomap.o
......
/* user_fixup.c: Fix up user copy faults.
*
* Copyright (C) 2004 David S. Miller <davem@redhat.com>
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <asm/uaccess.h>
/* Calculating the exact fault address when using
* block loads and stores can be very complicated.
*
* Instead of trying to be clever and handling all
* of the cases, just fix things up simply here.
*/
static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
{
unsigned long fault_addr = current_thread_info()->fault_address;
unsigned long end = start + size;
if (fault_addr < start || fault_addr >= end) {
*offset = 0;
} else {
*offset = fault_addr - start;
size = end - fault_addr;
}
return size;
}
unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
{
unsigned long offset;
size = compute_size((unsigned long) from, size, &offset);
if (likely(size))
memset(to + offset, 0, size);
return size;
}
EXPORT_SYMBOL(copy_from_user_fixup);
unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
{
unsigned long offset;
return compute_size((unsigned long) to, size, &offset);
}
EXPORT_SYMBOL(copy_to_user_fixup);
unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
{
unsigned long fault_addr = current_thread_info()->fault_address;
unsigned long start = (unsigned long) to;
unsigned long end = start + size;
if (fault_addr >= start && fault_addr < end)
return end - fault_addr;
start = (unsigned long) from;
end = start + size;
if (fault_addr >= start && fault_addr < end)
return end - fault_addr;
return size;
}
EXPORT_SYMBOL(copy_in_user_fixup);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment