Commit 2fc83c2c authored by Rob Herring's avatar Rob Herring Committed by Arnaldo Carvalho de Melo

tools include: Add an initial math64.h

Add an initial math64.h similar to linux/math64.h with functions
mul_u64_u64_div64() and mul_u64_u32_shr(). This isn't a direct copy of
include/linux/math64.h as that doesn't define mul_u64_u64_div64().

Implementation was written by Peter Zilkstra based on linux/math64.h
and div64.h[1]. The original implementation was not optimal on arm64 as
__int128 division is not optimal with a call out to __udivti3, so I
dropped the __int128 variant of mul_u64_u64_div64().

[1] https://lore.kernel.org/lkml/20200322101848.GF2452@worktop.programming.kicks-ass.net/Signed-off-by: default avatarRob Herring <robh@kernel.org>
Acked-by: default avatarJiri Olsa <jolsa@redhat.com>
Acked-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Itaru Kitayama <itaru.kitayama@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Link: http://lore.kernel.org/lkml/20210414155412.3697605-2-robh@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 2e1daee1
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MATH64_H
#define _LINUX_MATH64_H
#include <linux/types.h>
#ifdef __x86_64__
static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
{
u64 q;
asm ("mulq %2; divq %3" : "=a" (q)
: "a" (a), "rm" (b), "rm" (c)
: "rdx");
return q;
}
#define mul_u64_u64_div64 mul_u64_u64_div64
#endif
#ifdef __SIZEOF_INT128__
static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
{
return (u64)(((unsigned __int128)a * b) >> shift);
}
#else
#ifdef __i386__
static inline u64 mul_u32_u32(u32 a, u32 b)
{
u32 high, low;
asm ("mull %[b]" : "=a" (low), "=d" (high)
: [a] "a" (a), [b] "rm" (b) );
return low | ((u64)high) << 32;
}
#else
static inline u64 mul_u32_u32(u32 a, u32 b)
{
return (u64)a * b;
}
#endif
static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
{
u32 ah, al;
u64 ret;
al = a;
ah = a >> 32;
ret = mul_u32_u32(al, b) >> shift;
if (ah)
ret += mul_u32_u32(ah, b) << (32 - shift);
return ret;
}
#endif /* __SIZEOF_INT128__ */
#ifndef mul_u64_u64_div64
static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
{
u64 quot, rem;
quot = a / c;
rem = a % c;
return quot * b + (rem * b) / c;
}
#endif
#endif /* _LINUX_MATH64_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment