checksum.h 5.09 KB
Newer Older
1
#ifdef __KERNEL__
Hirokazu Takata's avatar
Hirokazu Takata committed
2 3 4 5
#ifndef _ASM_M32R_CHECKSUM_H
#define _ASM_M32R_CHECKSUM_H

/*
6 7 8 9 10 11 12 13 14
 * include/asm-m32r/checksum.h
 *
 * IP/TCP/UDP checksum routines
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Some code taken from mips and parisc architecture.
Hirokazu Takata's avatar
Hirokazu Takata committed
15 16
 *
 *    Copyright (C) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata
17
 *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
Hirokazu Takata's avatar
Hirokazu Takata committed
18 19 20 21 22 23 24 25 26 27 28 29 30 31
 */

/*
 * computes the checksum of a memory block at buff, length len,
 * and adds in "sum" (32-bit)
 *
 * returns a 32-bit number suitable for feeding into itself
 * or csum_tcpudp_magic
 *
 * this function must be called with even lengths, except
 * for the last fragment, which may be odd
 *
 * it's best to have buff aligned on a 32-bit boundary
 */
32
asmlinkage unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum);
Hirokazu Takata's avatar
Hirokazu Takata committed
33 34

/*
35
 * The same as csum_partial, but copies from src while it checksums.
Hirokazu Takata's avatar
Hirokazu Takata committed
36
 *
37
 * Here even more important to align src and dst on a 32-bit (or even
Hirokazu Takata's avatar
Hirokazu Takata committed
38 39
 * better 64-bit) boundary
 */
40 41
extern unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
                                              int len, unsigned int sum);
Hirokazu Takata's avatar
Hirokazu Takata committed
42 43

/*
44 45
 * This is a new version of the above that records errors it finds in *errp,
 * but continues and zeros thre rest of the buffer.
Hirokazu Takata's avatar
Hirokazu Takata committed
46
 */
47 48 49 50
extern unsigned int csum_partial_copy_from_user(const char __user *src,
                                                char *dst,
                                                int len, unsigned int sum,
                                                int *err_ptr);
Hirokazu Takata's avatar
Hirokazu Takata committed
51 52 53 54 55

/*
 *	Fold a partial checksum
 */

56
static inline unsigned int csum_fold(unsigned int sum)
Hirokazu Takata's avatar
Hirokazu Takata committed
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
{
	unsigned long tmpreg;
	__asm__(
		"	sll3	%1, %0, #16 \n"
		"	cmp	%0, %0 \n"
		"	addx	%0, %1 \n"
		"	ldi	%1, #0 \n"
		"	srli	%0, #16 \n"
		"	addx	%0, %1 \n"
		"	xor3	%0, %0, #0x0000ffff \n"
		: "=r" (sum), "=&r" (tmpreg)
		: "0"  (sum)
		: "cbit"
	);
	return sum;
}

/*
75 76
 * This is a version of ip_compute_csum() optimized for IP headers,
 * which always checksum on 4 octet boundaries.
Hirokazu Takata's avatar
Hirokazu Takata committed
77
 */
78
static inline unsigned short ip_fast_csum(unsigned char * iph,
Hirokazu Takata's avatar
Hirokazu Takata committed
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
					  unsigned int ihl) {
	unsigned long sum, tmpreg0, tmpreg1;

	__asm__ __volatile__(
		"	ld	%0, @%1+ \n"
		"	addi	%2, #-4 \n"
		"#	bgez	%2, 2f \n"
		"	cmp	%0, %0 \n"
		"	ld	%3, @%1+ \n"
		"	ld	%4, @%1+ \n"
		"	addx	%0, %3 \n"
		"	ld	%3, @%1+ \n"
		"	addx	%0, %4 \n"
		"	addx	%0, %3 \n"
		"	.fillinsn\n"
		"1: \n"
		"	ld	%4, @%1+ \n"
		"	addi	%2, #-1 \n"
		"	addx	%0, %4 \n"
		"	bgtz	%2, 1b \n"
		"\n"
		"	ldi	%3, #0 \n"
		"	addx	%0, %3 \n"
		"	.fillinsn\n"
		"2: \n"
	/* Since the input registers which are loaded with iph and ipl
	   are modified, we must also specify them as outputs, or gcc
	   will assume they contain their original values. */
	: "=&r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmpreg0), "=&r" (tmpreg1)
	: "1" (iph), "2" (ihl)
	: "cbit", "memory");

	return csum_fold(sum);
}

114 115 116 117 118
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
					       unsigned long daddr,
					       unsigned short len,
					       unsigned short proto,
					       unsigned int sum)
Hirokazu Takata's avatar
Hirokazu Takata committed
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
{
#if defined(__LITTLE_ENDIAN)
	unsigned long len_proto = (ntohs(len)<<16)+proto*256;
#else
	unsigned long len_proto = (proto<<16)+len;
#endif
	unsigned long tmpreg;

	__asm__(
		"	cmp	%0, %0 \n"
		"	addx	%0, %2 \n"
		"	addx	%0, %3 \n"
		"	addx	%0, %4 \n"
		"	ldi	%1, #0 \n"
		"	addx	%0, %1 \n"
		: "=r" (sum), "=&r" (tmpreg)
		: "r" (daddr), "r" (saddr), "r" (len_proto), "0" (sum)
		: "cbit"
	);

	return sum;
}

/*
 * computes the checksum of the TCP/UDP pseudo-header
 * returns a 16-bit checksum, already complemented
 */
146
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
Hirokazu Takata's avatar
Hirokazu Takata committed
147 148 149 150 151 152 153 154 155 156 157 158 159
						   unsigned long daddr,
						   unsigned short len,
						   unsigned short proto,
						   unsigned int sum)
{
	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}

/*
 * this routine is used for miscellaneous IP-like checksums, mainly
 * in icmp.c
 */

160
static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
Hirokazu Takata's avatar
Hirokazu Takata committed
161 162 163 164
	return csum_fold (csum_partial(buff, len, 0));
}

#define _HAVE_ARCH_IPV6_CSUM
165 166 167 168 169
static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
						 struct in6_addr *daddr,
						 __u16 len,
						 unsigned short proto,
						 unsigned int sum)
Hirokazu Takata's avatar
Hirokazu Takata committed
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
{
	unsigned long tmpreg0, tmpreg1, tmpreg2, tmpreg3;
	__asm__(
		"	ld	%1, @(%5) \n"
		"	ld	%2, @(4,%5) \n"
		"	ld	%3, @(8,%5) \n"
		"	ld	%4, @(12,%5) \n"
		"	add	%0, %1 \n"
		"	addx	%0, %2 \n"
		"	addx	%0, %3 \n"
		"	addx	%0, %4 \n"
		"	ld	%1, @(%6) \n"
		"	ld	%2, @(4,%6) \n"
		"	ld	%3, @(8,%6) \n"
		"	ld	%4, @(12,%6) \n"
		"	addx	%0, %1 \n"
		"	addx	%0, %2 \n"
		"	addx	%0, %3 \n"
		"	addx	%0, %4 \n"
		"	addx	%0, %7 \n"
		"	addx	%0, %8 \n"
		"	ldi	%1, #0 \n"
		"	addx	%0, %1 \n"
		: "=&r" (sum), "=&r" (tmpreg0), "=&r" (tmpreg1),
		  "=&r" (tmpreg2), "=&r" (tmpreg3)
		: "r" (saddr), "r" (daddr),
		  "r" (htonl((__u32) (len))), "r" (htonl(proto)), "0" (sum)
		: "cbit"
        );

	return csum_fold(sum);
}

#endif /* _ASM_M32R_CHECKSUM_H */
204
#endif /* __KERNEL__ */