Commit 9be259aa authored by Al Viro's avatar Al Viro Committed by David S. Miller

[NET]: Alpha checksum annotations and cleanups.

* sanitize prototypes and annotate
* kill useless access_ok() in csum_partial_copy_from_user() (the only
caller checks it already).
* do_csum_partial_copy_from_user() is not needed now
* replace htons(len) with len << 8 - they are the same wrt checksums
on little-endian.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2bc35798
...@@ -41,28 +41,25 @@ static inline unsigned short from64to16(unsigned long x) ...@@ -41,28 +41,25 @@ static inline unsigned short from64to16(unsigned long x)
* computes the checksum of the TCP/UDP pseudo-header * computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented. * returns a 16-bit checksum, already complemented.
*/ */
unsigned short int csum_tcpudp_magic(unsigned long saddr, __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned long daddr,
unsigned short len, unsigned short len,
unsigned short proto, unsigned short proto,
unsigned int sum) __wsum sum)
{ {
return ~from64to16(saddr + daddr + sum + return (__force __sum16)~from64to16(
((unsigned long) ntohs(len) << 16) + (__force u64)saddr + (__force u64)daddr +
((unsigned long) proto << 8)); (__force u64)sum + ((len + proto) << 8));
} }
unsigned int csum_tcpudp_nofold(unsigned long saddr, __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned long daddr,
unsigned short len, unsigned short len,
unsigned short proto, unsigned short proto,
unsigned int sum) __wsum sum)
{ {
unsigned long result; unsigned long result;
result = (saddr + daddr + sum + result = (__force u64)saddr + (__force u64)daddr +
((unsigned long) ntohs(len) << 16) + (__force u64)sum + ((len + proto) << 8);
((unsigned long) proto << 8));
/* Fold down to 32-bits so we don't lose in the typedef-less /* Fold down to 32-bits so we don't lose in the typedef-less
network stack. */ network stack. */
...@@ -70,7 +67,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr, ...@@ -70,7 +67,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr,
result = (result & 0xffffffff) + (result >> 32); result = (result & 0xffffffff) + (result >> 32);
/* 33 to 32 */ /* 33 to 32 */
result = (result & 0xffffffff) + (result >> 32); result = (result & 0xffffffff) + (result >> 32);
return result; return (__force __wsum)result;
} }
/* /*
...@@ -146,9 +143,9 @@ static inline unsigned long do_csum(const unsigned char * buff, int len) ...@@ -146,9 +143,9 @@ static inline unsigned long do_csum(const unsigned char * buff, int len)
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. * which always checksum on 4 octet boundaries.
*/ */
unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{ {
return ~do_csum(iph,ihl*4); return (__force __sum16)~do_csum(iph,ihl*4);
} }
/* /*
...@@ -163,15 +160,15 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) ...@@ -163,15 +160,15 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
* *
* it's best to have buff aligned on a 32-bit boundary * it's best to have buff aligned on a 32-bit boundary
*/ */
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) __wsum csum_partial(const void *buff, int len, __wsum sum)
{ {
unsigned long result = do_csum(buff, len); unsigned long result = do_csum(buff, len);
/* add in old sum, and carry.. */ /* add in old sum, and carry.. */
result += sum; result += (__force u32)sum;
/* 32+c bits -> 32 bits */ /* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32); result = (result & 0xffffffff) + (result >> 32);
return result; return (__force __wsum)result;
} }
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
...@@ -180,7 +177,7 @@ EXPORT_SYMBOL(csum_partial); ...@@ -180,7 +177,7 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c * in icmp.c
*/ */
unsigned short ip_compute_csum(unsigned char * buff, int len) __sum16 ip_compute_csum(const void *buff, int len)
{ {
return ~from64to16(do_csum(buff,len)); return (__force __sum16)~from64to16(do_csum(buff,len));
} }
...@@ -329,11 +329,11 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, ...@@ -329,11 +329,11 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
return checksum; return checksum;
} }
static unsigned int __wsum
do_csum_partial_copy_from_user(const char __user *src, char *dst, int len, csum_partial_copy_from_user(const void __user *src, void *dst, int len,
unsigned int sum, int *errp) __wsum sum, int *errp)
{ {
unsigned long checksum = (unsigned) sum; unsigned long checksum = (__force u32) sum;
unsigned long soff = 7 & (unsigned long) src; unsigned long soff = 7 & (unsigned long) src;
unsigned long doff = 7 & (unsigned long) dst; unsigned long doff = 7 & (unsigned long) dst;
...@@ -367,25 +367,12 @@ do_csum_partial_copy_from_user(const char __user *src, char *dst, int len, ...@@ -367,25 +367,12 @@ do_csum_partial_copy_from_user(const char __user *src, char *dst, int len,
} }
checksum = from64to16 (checksum); checksum = from64to16 (checksum);
} }
return checksum; return (__force __wsum)checksum;
}
unsigned int
csum_partial_copy_from_user(const char __user *src, char *dst, int len,
unsigned int sum, int *errp)
{
if (!access_ok(VERIFY_READ, src, len)) {
*errp = -EFAULT;
memset(dst, 0, len);
return sum;
}
return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
} }
unsigned int __wsum
csum_partial_copy_nocheck(const char __user *src, char *dst, int len, csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
unsigned int sum)
{ {
return do_csum_partial_copy_from_user(src, dst, len, sum, NULL); return csum_partial_copy_from_user((__force const void __user *)src,
dst, len, sum, NULL);
} }
...@@ -7,21 +7,20 @@ ...@@ -7,21 +7,20 @@
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. * which always checksum on 4 octet boundaries.
*/ */
extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl); extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* /*
* computes the checksum of the TCP/UDP pseudo-header * computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented * returns a 16-bit checksum, already complemented
*/ */
extern unsigned short int csum_tcpudp_magic(unsigned long saddr, extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned long daddr,
unsigned short len, unsigned short len,
unsigned short proto, unsigned short proto,
unsigned int sum); __wsum sum);
unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len, unsigned short proto, unsigned short len, unsigned short proto,
unsigned int sum); __wsum sum);
/* /*
* computes the checksum of a memory block at buff, length len, * computes the checksum of a memory block at buff, length len,
...@@ -35,7 +34,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, ...@@ -35,7 +34,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
* *
* it's best to have buff aligned on a 32-bit boundary * it's best to have buff aligned on a 32-bit boundary
*/ */
extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* /*
* the same as csum_partial, but copies from src while it * the same as csum_partial, but copies from src while it
...@@ -44,9 +43,9 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i ...@@ -44,9 +43,9 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i
* here even more important to align src and dst on a 32-bit (or even * here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary * better 64-bit) boundary
*/ */
unsigned int csum_partial_copy_from_user(const char __user *src, char *dst, int len, unsigned int sum, int *errp); __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum); __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
/* /*
...@@ -54,24 +53,23 @@ unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsi ...@@ -54,24 +53,23 @@ unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsi
* in icmp.c * in icmp.c
*/ */
extern unsigned short ip_compute_csum(unsigned char * buff, int len); extern __sum16 ip_compute_csum(const void *buff, int len);
/* /*
* Fold a partial checksum without adding pseudo headers * Fold a partial checksum without adding pseudo headers
*/ */
static inline unsigned short csum_fold(unsigned int sum) static inline __sum16 csum_fold(__wsum csum)
{ {
u32 sum = (__force u32)csum;
sum = (sum & 0xffff) + (sum >> 16); sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16); sum = (sum & 0xffff) + (sum >> 16);
return ~sum; return (__force __sum16)~sum;
} }
#define _HAVE_ARCH_IPV6_CSUM #define _HAVE_ARCH_IPV6_CSUM
extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr, extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
struct in6_addr *daddr, const struct in6_addr *daddr,
__u32 len, __u32 len, unsigned short proto,
unsigned short proto, __wsum sum);
unsigned int sum);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment