Skip to content

Commit 1beb35e

Browse files
ahunter6KAGA-KOKO
authored andcommitted
vdso, math64: Provide mul_u64_u32_add_u64_shr()
Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used by timekeeping and VDSO. Place #include <vdso/math64.h> after #include <asm/div64.h> to allow architecture-specific overrides, at least for the kernel. Signed-off-by: Adrian Hunter <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5e5e514 commit 1beb35e

File tree

2 files changed

+39
-1
lines changed

2 files changed

+39
-1
lines changed

include/linux/math64.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
#include <linux/types.h>
66
#include <linux/math.h>
7-
#include <vdso/math64.h>
87
#include <asm/div64.h>
8+
#include <vdso/math64.h>
99

1010
#if BITS_PER_LONG == 64
1111

include/vdso/math64.h

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
2121
return ret;
2222
}
2323

24+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
25+
26+
#ifndef mul_u64_u32_add_u64_shr
27+
static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
28+
{
29+
return (u64)((((unsigned __int128)a * mul) + b) >> shift);
30+
}
31+
#endif /* mul_u64_u32_add_u64_shr */
32+
33+
#else
34+
35+
#ifndef mul_u64_u32_add_u64_shr
36+
#ifndef mul_u32_u32
37+
static inline u64 mul_u32_u32(u32 a, u32 b)
38+
{
39+
return (u64)a * b;
40+
}
41+
#define mul_u32_u32 mul_u32_u32
42+
#endif
43+
static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
44+
{
45+
u32 ah = a >> 32, al = a;
46+
bool ovf;
47+
u64 ret;
48+
49+
ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
50+
ret >>= shift;
51+
if (ovf && shift)
52+
ret += 1ULL << (64 - shift);
53+
if (ah)
54+
ret += mul_u32_u32(ah, mul) << (32 - shift);
55+
56+
return ret;
57+
}
58+
#endif /* mul_u64_u32_add_u64_shr */
59+
60+
#endif
61+
2462
#endif /* __VDSO_MATH64_H */

0 commit comments

Comments
 (0)