Commit d76c1ae4 authored by Ingo Molnar's avatar Ingo Molnar

x86: clean up csum-wrappers_64.c some more

no code changed:

arch/x86/lib/csum-wrappers_64.o:
   text    data     bss     dec     hex filename
    839       0       0     839     347 csum-wrappers_64.o.before
    839       0       0     839     347 csum-wrappers_64.o.after
md5:
b31994226c33e0b52bef5a0e110b84b0  csum-wrappers_64.o.before.asm
b31994226c33e0b52bef5a0e110b84b0  csum-wrappers_64.o.after.asm
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 0df025b7
/* Copyright 2002,2003 Andi Kleen, SuSE Labs. /*
* Copyright 2002, 2003 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v.2 * Subject to the GNU Public License v.2
* *
* Wrappers of assembly checksum functions for x86-64. * Wrappers of assembly checksum functions for x86-64.
*/ */
#include <asm/checksum.h> #include <asm/checksum.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -24,37 +24,47 @@ csum_partial_copy_from_user(const void __user *src, void *dst, ...@@ -24,37 +24,47 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
{ {
might_sleep(); might_sleep();
*errp = 0; *errp = 0;
if (likely(access_ok(VERIFY_READ, src, len))) {
/* Why 6, not 7? To handle odd addresses aligned we if (!likely(access_ok(VERIFY_READ, src, len)))
would need to do considerable complications to fix the goto out_err;
checksum which is defined as an 16bit accumulator. The
fix alignment code is primarily for performance /*
compatibility with 32bit and that will handle odd * Why 6, not 7? To handle odd addresses aligned we
addresses slowly too. */ * would need to do considerable complications to fix the
if (unlikely((unsigned long)src & 6)) { * checksum which is defined as an 16bit accumulator. The
while (((unsigned long)src & 6) && len >= 2) { * fix alignment code is primarily for performance
__u16 val16; * compatibility with 32bit and that will handle odd
*errp = __get_user(val16, (const __u16 __user *)src); * addresses slowly too.
if (*errp) */
return isum; if (unlikely((unsigned long)src & 6)) {
*(__u16 *)dst = val16; while (((unsigned long)src & 6) && len >= 2) {
isum = (__force __wsum)add32_with_carry( __u16 val16;
(__force unsigned)isum, val16);
src += 2; *errp = __get_user(val16, (const __u16 __user *)src);
dst += 2; if (*errp)
len -= 2; return isum;
}
*(__u16 *)dst = val16;
isum = (__force __wsum)add32_with_carry(
(__force unsigned)isum, val16);
src += 2;
dst += 2;
len -= 2;
} }
isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL);
if (likely(*errp == 0))
return isum;
} }
isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL);
if (unlikely(*errp))
goto out_err;
return isum;
out_err:
*errp = -EFAULT; *errp = -EFAULT;
memset(dst, 0, len); memset(dst, 0, len);
return isum; return isum;
} }
EXPORT_SYMBOL(csum_partial_copy_from_user); EXPORT_SYMBOL(csum_partial_copy_from_user);
/** /**
...@@ -73,6 +83,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst, ...@@ -73,6 +83,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp) int len, __wsum isum, int *errp)
{ {
might_sleep(); might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
*errp = -EFAULT; *errp = -EFAULT;
return 0; return 0;
...@@ -81,6 +92,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst, ...@@ -81,6 +92,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
if (unlikely((unsigned long)dst & 6)) { if (unlikely((unsigned long)dst & 6)) {
while (((unsigned long)dst & 6) && len >= 2) { while (((unsigned long)dst & 6) && len >= 2) {
__u16 val16 = *(__u16 *)src; __u16 val16 = *(__u16 *)src;
isum = (__force __wsum)add32_with_carry( isum = (__force __wsum)add32_with_carry(
(__force unsigned)isum, val16); (__force unsigned)isum, val16);
*errp = __put_user(val16, (__u16 __user *)dst); *errp = __put_user(val16, (__u16 __user *)dst);
...@@ -93,9 +105,9 @@ csum_partial_copy_to_user(const void *src, void __user *dst, ...@@ -93,9 +105,9 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
} }
*errp = 0; *errp = 0;
return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); return csum_partial_copy_generic(src, (void __force *)dst,
len, isum, NULL, errp);
} }
EXPORT_SYMBOL(csum_partial_copy_to_user); EXPORT_SYMBOL(csum_partial_copy_to_user);
/** /**
...@@ -122,14 +134,17 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ...@@ -122,14 +134,17 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
(__force __u64)sum; (__force __u64)sum;
asm(" addq (%[saddr]),%[sum]\n"
" adcq 8(%[saddr]),%[sum]\n" asm(" addq (%[saddr]),%[sum]\n"
" adcq (%[daddr]),%[sum]\n" " adcq 8(%[saddr]),%[sum]\n"
" adcq 8(%[daddr]),%[sum]\n" " adcq (%[daddr]),%[sum]\n"
" adcq $0,%[sum]\n" " adcq 8(%[daddr]),%[sum]\n"
" adcq $0,%[sum]\n"
: [sum] "=r" (sum64) : [sum] "=r" (sum64)
: "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
}
return csum_fold(
(__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
}
EXPORT_SYMBOL(csum_ipv6_magic); EXPORT_SYMBOL(csum_ipv6_magic);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment