Commit 34115065 authored by Eric Dumazet's avatar Eric Dumazet Committed by Borislav Petkov

x86/csum: Rewrite/optimize csum_partial()

With more NICs supporting CHECKSUM_COMPLETE, and IPv6 being widely
used csum_partial() is heavily used with small amount of bytes, and is
consuming many cycles.

IPv6 header size, for instance, is 40 bytes.

Another thing to consider is that NET_IP_ALIGN is 0 on x86, meaning
that network headers are not word-aligned, unless the driver forces
this.

This means that csum_partial() fetches one u16 to 'align the buffer',
then performs three u64 additions with carry in a loop, then a
remaining u32, then a remaining u16.

With this new version, it performs a loop only for the 64 bytes blocks,
then the remaining is bisected.

Testing on various CPUs, all of them show a big reduction in
csum_partial() cost (by 50 to 80 %)

Before:
	4.16%  [kernel]       [k] csum_partial
After:
	0.83%  [kernel]       [k] csum_partial

If run in a loop 1,000,000 times:

Before:
	26,922,913      cycles                    # 3846130.429 GHz
	80,302,961      instructions              #    2.98  insn per cycle
	21,059,816      branches                  # 3008545142.857 M/sec
	     2,896      branch-misses             #    0.01% of all branches
After:
	17,960,709      cycles                    # 3592141.800 GHz
	41,292,805      instructions              #    2.30  insn per cycle
	11,058,119      branches                  # 2211623800.000 M/sec
	     2,997      branch-misses             #    0.03% of all branches

 [ bp: Massage, merge in subsequent fixes into a single patch:
   - um compilation error due to missing load_unaligned_zeropad():
	- Reported-by: kernel test robot <lkp@intel.com>
	- Link: https://lkml.kernel.org/r/20211118175239.1525650-1-eric.dumazet@gmail.com
   - Fix initial seed for odd buffers
	- Reported-by: Noah Goldstein <goldstein.w.n@gmail.com>
	- Link: https://lkml.kernel.org/r/20211125141817.3541501-1-eric.dumazet@gmail.com
  ]
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarAlexander Duyck <alexanderduyck@fb.com>
Link: https://lore.kernel.org/r/20211112161950.528886-1-eric.dumazet@gmail.com
parent 0fcfb00b
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/word-at-a-time.h>
static inline unsigned short from32to16(unsigned a) static inline unsigned short from32to16(unsigned a)
{ {
...@@ -21,120 +22,119 @@ static inline unsigned short from32to16(unsigned a) ...@@ -21,120 +22,119 @@ static inline unsigned short from32to16(unsigned a)
} }
/* /*
* Do a 64-bit checksum on an arbitrary memory area. * Do a checksum on an arbitrary memory area.
* Returns a 32bit checksum. * Returns a 32bit checksum.
* *
* This isn't as time critical as it used to be because many NICs * This isn't as time critical as it used to be because many NICs
* do hardware checksumming these days. * do hardware checksumming these days.
* *
* Things tried and found to not make it faster: * Still, with CHECKSUM_COMPLETE this is called to compute
* Manual Prefetching * checksums on IPv6 headers (40 bytes) and other small parts.
* Unrolling to an 128 bytes inner loop. * it's best to have buff aligned on a 64-bit boundary
* Using interleaving with more registers to break the carry chains.
*/ */
static unsigned do_csum(const unsigned char *buff, unsigned len) __wsum csum_partial(const void *buff, int len, __wsum sum)
{ {
unsigned odd, count; u64 temp64 = (__force u64)sum;
unsigned long result = 0; unsigned odd, result;
if (unlikely(len == 0))
return result;
odd = 1 & (unsigned long) buff; odd = 1 & (unsigned long) buff;
if (unlikely(odd)) { if (unlikely(odd)) {
result = *buff << 8; if (unlikely(len == 0))
return sum;
temp64 = ror32((__force u32)sum, 8);
temp64 += (*(unsigned char *)buff << 8);
len--; len--;
buff++; buff++;
} }
count = len >> 1; /* nr of 16-bit words.. */
if (count) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *)buff;
count--;
len -= 2;
buff += 2;
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
unsigned long zero;
unsigned count64;
if (4 & (unsigned long) buff) {
result += *(unsigned int *) buff;
count--;
len -= 4;
buff += 4;
}
count >>= 1; /* nr of 64-bit words.. */
/* main loop using 64byte blocks */ while (unlikely(len >= 64)) {
zero = 0; asm("addq 0*8(%[src]),%[res]\n\t"
count64 = count >> 3; "adcq 1*8(%[src]),%[res]\n\t"
while (count64) { "adcq 2*8(%[src]),%[res]\n\t"
asm("addq 0*8(%[src]),%[res]\n\t" "adcq 3*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t" "adcq 4*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t" "adcq 5*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t" "adcq 6*8(%[src]),%[res]\n\t"
"adcq 4*8(%[src]),%[res]\n\t" "adcq 7*8(%[src]),%[res]\n\t"
"adcq 5*8(%[src]),%[res]\n\t" "adcq $0,%[res]"
"adcq 6*8(%[src]),%[res]\n\t" : [res] "+r" (temp64)
"adcq 7*8(%[src]),%[res]\n\t" : [src] "r" (buff)
"adcq %[zero],%[res]" : "memory");
: [res] "=r" (result) buff += 64;
: [src] "r" (buff), [zero] "r" (zero), len -= 64;
"[res]" (result)); }
buff += 64;
count64--; if (len & 32) {
} asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 8;
}
if (len & 7) {
#ifdef CONFIG_DCACHE_WORD_ACCESS
unsigned int shift = (8 - (len & 7)) * 8;
unsigned long trail;
/* last up to 7 8byte blocks */ trail = (load_unaligned_zeropad(buff) << shift) >> shift;
count %= 8;
while (count) {
asm("addq %1,%0\n\t"
"adcq %2,%0\n"
: "=r" (result)
: "m" (*(unsigned long *)buff),
"r" (zero), "0" (result));
--count;
buff += 8;
}
result = add32_with_carry(result>>32,
result&0xffffffff);
if (len & 4) { asm("addq %[trail],%[res]\n\t"
result += *(unsigned int *) buff; "adcq $0,%[res]"
buff += 4; : [res] "+r" (temp64)
} : [trail] "r" (trail));
#else
if (len & 4) {
asm("addq %[val],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [val] "r" ((u64)*(u32 *)buff)
: "memory");
buff += 4;
} }
if (len & 2) { if (len & 2) {
result += *(unsigned short *) buff; asm("addq %[val],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [val] "r" ((u64)*(u16 *)buff)
: "memory");
buff += 2; buff += 2;
} }
if (len & 1) {
asm("addq %[val],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [val] "r" ((u64)*(u8 *)buff)
: "memory");
}
#endif
} }
if (len & 1) result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
result += *buff; if (unlikely(odd)) {
result = add32_with_carry(result>>32, result & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result); result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
} }
return result; return (__force __wsum)result;
}
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 64-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum);
} }
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
...@@ -147,4 +147,3 @@ __sum16 ip_compute_csum(const void *buff, int len) ...@@ -147,4 +147,3 @@ __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff,len,0)); return csum_fold(csum_partial(buff,len,0));
} }
EXPORT_SYMBOL(ip_compute_csum); EXPORT_SYMBOL(ip_compute_csum);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment