Commit 8345228c authored by Christophe Leroy's avatar Christophe Leroy Committed by Thomas Gleixner

lib/vdso: Allow architectures to override the ns shift operation

On powerpc/32, GCC (8.1) generates pretty bad code for the ns >>= vd->shift
operation taking into account that the shift is always <= 32 and the upper
part of the result is likely to be zero. GCC makes reversed assumptions
considering the shift to be likely >= 32 and the upper part to be like not
zero.

unsigned long long shift(unsigned long long x, unsigned char s)
{
	return x >> s;
}

results in:

00000018 <shift>:
  18:	35 25 ff e0 	addic.  r9,r5,-32
  1c:	41 80 00 10 	blt     2c <shift+0x14>
  20:	7c 64 4c 30 	srw     r4,r3,r9
  24:	38 60 00 00 	li      r3,0
  28:	4e 80 00 20 	blr
  2c:	54 69 08 3c 	rlwinm  r9,r3,1,0,30
  30:	21 45 00 1f 	subfic  r10,r5,31
  34:	7c 84 2c 30 	srw     r4,r4,r5
  38:	7d 29 50 30 	slw     r9,r9,r10
  3c:	7c 63 2c 30 	srw     r3,r3,r5
  40:	7d 24 23 78 	or      r4,r9,r4
  44:	4e 80 00 20 	blr

Even when forcing the shift to be smaller than 32 with an &= 31, it still
considers the shift as likely >= 32.

Move the default shift implementation into an inline which can be redefined
in architecture code via a macro.

[ tglx: Made the shift argument u32 and removed the __arch prefix ]
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/b3d449de856982ed060a71e6ace8eeca4654e685.1580399657.git.christophe.leroy@c-s.fr
Link: https://lkml.kernel.org/r/20200207124403.857649978@linutronix.de

parent ae12e085
...@@ -39,6 +39,13 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) ...@@ -39,6 +39,13 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
} }
#endif #endif
#ifndef vdso_shift_ns
static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
{
return ns >> shift;
}
#endif
#ifndef __arch_vdso_hres_capable #ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable(void) static inline bool __arch_vdso_hres_capable(void)
{ {
...@@ -80,7 +87,7 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, ...@@ -80,7 +87,7 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
ns = vdso_ts->nsec; ns = vdso_ts->nsec;
last = vd->cycle_last; last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns >>= vd->shift; ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec; sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq))); } while (unlikely(vdso_read_retry(vd, seq)));
...@@ -148,7 +155,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, ...@@ -148,7 +155,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
ns = vdso_ts->nsec; ns = vdso_ts->nsec;
last = vd->cycle_last; last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns >>= vd->shift; ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec; sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq))); } while (unlikely(vdso_read_retry(vd, seq)));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment