Commit 98ad45fb authored by Heiko Carstens's avatar Heiko Carstens Committed by Vasily Gorbik

s390/checksum: coding style changes

Add some coding style changes which hopefully make the code
look a bit less odd.
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 612ad078
...@@ -16,19 +16,18 @@ ...@@ -16,19 +16,18 @@
#include <linux/in6.h> #include <linux/in6.h>
/* /*
* computes the checksum of a memory block at buff, length len, * Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit) * and adds in "sum" (32-bit).
* *
* returns a 32-bit number suitable for feeding into itself * Returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic * or csum_tcpudp_magic.
* *
* this function must be called with even lengths, except * This function must be called with even lengths, except
* for the last fragment, which may be odd * for the last fragment, which may be odd.
* *
* it's best to have buff aligned on a 32-bit boundary * It's best to have buff aligned on a 32-bit boundary.
*/ */
static inline __wsum static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
csum_partial(const void *buff, int len, __wsum sum)
{ {
register unsigned long reg2 asm("2") = (unsigned long) buff; register unsigned long reg2 asm("2") = (unsigned long) buff;
register unsigned long reg3 asm("3") = (unsigned long) len; register unsigned long reg3 asm("3") = (unsigned long) len;
...@@ -40,15 +39,15 @@ csum_partial(const void *buff, int len, __wsum sum) ...@@ -40,15 +39,15 @@ csum_partial(const void *buff, int len, __wsum sum)
return sum; return sum;
} }
static inline __wsum static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) int len, __wsum sum)
{ {
memcpy(dst,src,len); memcpy(dst, src, len);
return csum_partial(dst, len, sum); return csum_partial(dst, len, sum);
} }
/* /*
* Fold a partial checksum without adding pseudo headers * Fold a partial checksum without adding pseudo headers.
*/ */
static inline __sum16 csum_fold(__wsum sum) static inline __sum16 csum_fold(__wsum sum)
{ {
...@@ -60,9 +59,8 @@ static inline __sum16 csum_fold(__wsum sum) ...@@ -60,9 +59,8 @@ static inline __sum16 csum_fold(__wsum sum)
} }
/* /*
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. * which always checksums on 4 octet boundaries.
*
*/ */
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{ {
...@@ -81,8 +79,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -81,8 +79,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} }
/* /*
* computes the checksum of the TCP/UDP pseudo-header * Computes the checksum of the TCP/UDP pseudo-header.
* returns a 32-bit checksum * Returns a 32-bit checksum.
*/ */
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum) __u8 proto, __wsum sum)
...@@ -98,22 +96,18 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, ...@@ -98,22 +96,18 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
} }
/* /*
* computes the checksum of the TCP/UDP pseudo-header * Computes the checksum of the TCP/UDP pseudo-header.
* returns a 16-bit checksum, already complemented * Returns a 16-bit checksum, already complemented.
*/ */
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
static inline __sum16 __u8 proto, __wsum sum)
csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto,
__wsum sum)
{ {
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
} }
/* /*
* this routine is used for miscellaneous IP-like checksums, mainly * Used for miscellaneous IP-like checksums, mainly icmp.
* in icmp.c
*/ */
static inline __sum16 ip_compute_csum(const void *buff, int len) static inline __sum16 ip_compute_csum(const void *buff, int len)
{ {
return csum_fold(csum_partial(buff, len, 0)); return csum_fold(csum_partial(buff, len, 0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment