Commit a1ae4317 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Use rol32() instead of opencoding in csum_fold()

rol32(x, 16) will do the rotate using rlwinm.

No need to open code using inline assembly.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/794337eff7bb803d2c4e67d9eee635390c4c48fe.1646812553.git.christophe.leroy@csgroup.eu
parent e6f6390a
...@@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, ...@@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
*/ */
static inline __sum16 csum_fold(__wsum sum) static inline __sum16 csum_fold(__wsum sum)
{ {
unsigned int tmp; u32 tmp = (__force u32)sum;
/* swap the two 16-bit halves of sum */ /*
__asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); * swap the two 16-bit halves of sum
/* if there is a carry from adding the two 16-bit halves, * if there is a carry from adding the two 16-bit halves,
it will carry from the lower half into the upper half, * it will carry from the lower half into the upper half,
giving us the correct sum in the upper half. */ * giving us the correct sum in the upper half.
return (__force __sum16)(~((__force u32)sum + tmp) >> 16); */
return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
} }
static inline u32 from64to32(u64 x) static inline u32 from64to32(u64 x)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment