Commit 0df025b7 authored by Paolo Ciarrocchi's avatar Paolo Ciarrocchi Committed by Ingo Molnar

x86: coding style fixes in arch/x86/lib/csum-wrappers_64.c

no code changed:

arch/x86/lib/csum-wrappers_64.o:
   text    data     bss     dec     hex filename
    839       0       0     839     347 csum-wrappers_64.o.before
    839       0       0     839     347 csum-wrappers_64.o.after
md5:
b31994226c33e0b52bef5a0e110b84b0  csum-wrappers_64.o.before.asm
b31994226c33e0b52bef5a0e110b84b0  csum-wrappers_64.o.after.asm
Signed-off-by: default avatarPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 4b44f810
/* Copyright 2002,2003 Andi Kleen, SuSE Labs. /* Copyright 2002,2003 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v.2 * Subject to the GNU Public License v.2
* *
* Wrappers of assembly checksum functions for x86-64. * Wrappers of assembly checksum functions for x86-64.
*/ */
#include <asm/checksum.h> #include <asm/checksum.h>
#include <linux/module.h> #include <linux/module.h>
/** /**
* csum_partial_copy_from_user - Copy and checksum from user space. * csum_partial_copy_from_user - Copy and checksum from user space.
* @src: source address (user space) * @src: source address (user space)
* @dst: destination address * @dst: destination address
* @len: number of bytes to be copied. * @len: number of bytes to be copied.
* @isum: initial sum that is added into the result (32bit unfolded) * @isum: initial sum that is added into the result (32bit unfolded)
* @errp: set to -EFAULT for an bad source address. * @errp: set to -EFAULT for an bad source address.
* *
* Returns an 32bit unfolded checksum of the buffer. * Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits. * src and dst are best aligned to 64bits.
*/ */
__wsum __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum isum, int *errp) int len, __wsum isum, int *errp)
{ {
might_sleep(); might_sleep();
*errp = 0; *errp = 0;
if (likely(access_ok(VERIFY_READ,src, len))) { if (likely(access_ok(VERIFY_READ, src, len))) {
/* Why 6, not 7? To handle odd addresses aligned we /* Why 6, not 7? To handle odd addresses aligned we
would need to do considerable complications to fix the would need to do considerable complications to fix the
checksum which is defined as an 16bit accumulator. The checksum which is defined as an 16bit accumulator. The
fix alignment code is primarily for performance fix alignment code is primarily for performance
compatibility with 32bit and that will handle odd compatibility with 32bit and that will handle odd
addresses slowly too. */ addresses slowly too. */
if (unlikely((unsigned long)src & 6)) { if (unlikely((unsigned long)src & 6)) {
while (((unsigned long)src & 6) && len >= 2) { while (((unsigned long)src & 6) && len >= 2) {
__u16 val16; __u16 val16;
*errp = __get_user(val16, (const __u16 __user *)src); *errp = __get_user(val16, (const __u16 __user *)src);
if (*errp) if (*errp)
return isum; return isum;
*(__u16 *)dst = val16; *(__u16 *)dst = val16;
isum = (__force __wsum)add32_with_carry( isum = (__force __wsum)add32_with_carry(
(__force unsigned)isum, val16); (__force unsigned)isum, val16);
src += 2; src += 2;
dst += 2; dst += 2;
len -= 2; len -= 2;
} }
} }
isum = csum_partial_copy_generic((__force const void *)src, isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL); dst, len, isum, errp, NULL);
if (likely(*errp == 0)) if (likely(*errp == 0))
return isum; return isum;
} }
*errp = -EFAULT; *errp = -EFAULT;
memset(dst,0,len); memset(dst, 0, len);
return isum; return isum;
} }
EXPORT_SYMBOL(csum_partial_copy_from_user); EXPORT_SYMBOL(csum_partial_copy_from_user);
/** /**
* csum_partial_copy_to_user - Copy and checksum to user space. * csum_partial_copy_to_user - Copy and checksum to user space.
* @src: source address * @src: source address
* @dst: destination address (user space) * @dst: destination address (user space)
* @len: number of bytes to be copied. * @len: number of bytes to be copied.
* @isum: initial sum that is added into the result (32bit unfolded) * @isum: initial sum that is added into the result (32bit unfolded)
* @errp: set to -EFAULT for an bad destination address. * @errp: set to -EFAULT for an bad destination address.
* *
* Returns an 32bit unfolded checksum of the buffer. * Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits. * src and dst are best aligned to 64bits.
*/ */
__wsum __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, csum_partial_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp) int len, __wsum isum, int *errp)
{ {
might_sleep(); might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
*errp = -EFAULT; *errp = -EFAULT;
return 0; return 0;
} }
if (unlikely((unsigned long)dst & 6)) { if (unlikely((unsigned long)dst & 6)) {
while (((unsigned long)dst & 6) && len >= 2) { while (((unsigned long)dst & 6) && len >= 2) {
__u16 val16 = *(__u16 *)src; __u16 val16 = *(__u16 *)src;
isum = (__force __wsum)add32_with_carry( isum = (__force __wsum)add32_with_carry(
(__force unsigned)isum, val16); (__force unsigned)isum, val16);
*errp = __put_user(val16, (__u16 __user *)dst); *errp = __put_user(val16, (__u16 __user *)dst);
if (*errp) if (*errp)
return isum; return isum;
src += 2; src += 2;
dst += 2; dst += 2;
len -= 2; len -= 2;
} }
} }
*errp = 0; *errp = 0;
return csum_partial_copy_generic(src, (void __force *)dst,len,isum,NULL,errp); return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp);
} }
EXPORT_SYMBOL(csum_partial_copy_to_user); EXPORT_SYMBOL(csum_partial_copy_to_user);
/** /**
* csum_partial_copy_nocheck - Copy and checksum. * csum_partial_copy_nocheck - Copy and checksum.
* @src: source address * @src: source address
* @dst: destination address * @dst: destination address
* @len: number of bytes to be copied. * @len: number of bytes to be copied.
* @isum: initial sum that is added into the result (32bit unfolded) * @isum: initial sum that is added into the result (32bit unfolded)
* *
* Returns an 32bit unfolded checksum of the buffer. * Returns an 32bit unfolded checksum of the buffer.
*/ */
__wsum __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{ {
return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
} }
EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(csum_partial_copy_nocheck);
__sum16 csum_ipv6_magic(const struct in6_addr *saddr, __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
...@@ -119,16 +119,16 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ...@@ -119,16 +119,16 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
__u32 len, unsigned short proto, __wsum sum) __u32 len, unsigned short proto, __wsum sum)
{ {
__u64 rest, sum64; __u64 rest, sum64;
rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
(__force __u64)sum; (__force __u64)sum;
asm(" addq (%[saddr]),%[sum]\n" asm(" addq (%[saddr]),%[sum]\n"
" adcq 8(%[saddr]),%[sum]\n" " adcq 8(%[saddr]),%[sum]\n"
" adcq (%[daddr]),%[sum]\n" " adcq (%[daddr]),%[sum]\n"
" adcq 8(%[daddr]),%[sum]\n" " adcq 8(%[daddr]),%[sum]\n"
" adcq $0,%[sum]\n" " adcq $0,%[sum]\n"
: [sum] "=r" (sum64) : [sum] "=r" (sum64)
: "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment