Commit 3bbfafb7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86, tsc, locking/static_keys: Employ static_branch_likely()

Because of the static_key restrictions we had to take an unconditional
jump for the most likely case, causing $I bloat.

Rewrite to use the new primitives.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1987c947
......@@ -38,7 +38,7 @@ static int __read_mostly tsc_unstable;
erroneous rdtsc usage on !cpu_has_tsc processors */
static int __read_mostly tsc_disabled = -1;
static struct static_key __use_tsc = STATIC_KEY_INIT;
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
......@@ -274,7 +274,12 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
*/
u64 native_sched_clock(void)
{
u64 tsc_now;
if (static_branch_likely(&__use_tsc)) {
u64 tsc_now = rdtsc();
/* return the value in ns */
return cycles_2_ns(tsc_now);
}
/*
* Fall back to jiffies if there's no TSC available:
......@@ -284,16 +289,9 @@ u64 native_sched_clock(void)
* very important for it to be as fast as the platform
* can achieve it. )
*/
if (!static_key_false(&__use_tsc)) {
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
/* read the Time Stamp Counter: */
tsc_now = rdtsc();
/* return the value in ns */
return cycles_2_ns(tsc_now);
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
/* We need to define a real function for sched_clock, to override the
......@@ -1204,7 +1202,7 @@ void __init tsc_init(void)
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
static_key_slow_inc(&__use_tsc);
static_branch_enable(&__use_tsc);
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment