Commit 776f2291 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/clock: Make local_clock() noinstr

With sched_clock() noinstr, provide a noinstr implementation of
local_clock().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230126151323.760767043@infradead.org
parent 8739c681
...@@ -45,7 +45,7 @@ static inline u64 cpu_clock(int cpu) ...@@ -45,7 +45,7 @@ static inline u64 cpu_clock(int cpu)
return sched_clock(); return sched_clock();
} }
static inline u64 local_clock(void) static __always_inline u64 local_clock(void)
{ {
return sched_clock(); return sched_clock();
} }
...@@ -79,10 +79,8 @@ static inline u64 cpu_clock(int cpu) ...@@ -79,10 +79,8 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu); return sched_clock_cpu(cpu);
} }
static inline u64 local_clock(void) extern u64 local_clock(void);
{
return sched_clock_cpu(raw_smp_processor_id());
}
#endif #endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
......
...@@ -93,7 +93,7 @@ struct sched_clock_data { ...@@ -93,7 +93,7 @@ struct sched_clock_data {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
notrace static inline struct sched_clock_data *this_scd(void) static __always_inline struct sched_clock_data *this_scd(void)
{ {
return this_cpu_ptr(&sched_clock_data); return this_cpu_ptr(&sched_clock_data);
} }
...@@ -244,12 +244,12 @@ late_initcall(sched_clock_init_late); ...@@ -244,12 +244,12 @@ late_initcall(sched_clock_init_late);
* min, max except they take wrapping into account * min, max except they take wrapping into account
*/ */
notrace static inline u64 wrap_min(u64 x, u64 y) static __always_inline u64 wrap_min(u64 x, u64 y)
{ {
return (s64)(x - y) < 0 ? x : y; return (s64)(x - y) < 0 ? x : y;
} }
notrace static inline u64 wrap_max(u64 x, u64 y) static __always_inline u64 wrap_max(u64 x, u64 y)
{ {
return (s64)(x - y) > 0 ? x : y; return (s64)(x - y) > 0 ? x : y;
} }
...@@ -260,7 +260,7 @@ notrace static inline u64 wrap_max(u64 x, u64 y) ...@@ -260,7 +260,7 @@ notrace static inline u64 wrap_max(u64 x, u64 y)
* - filter out backward motion * - filter out backward motion
* - use the GTOD tick value to create a window to filter crazy TSC values * - use the GTOD tick value to create a window to filter crazy TSC values
*/ */
notrace static u64 sched_clock_local(struct sched_clock_data *scd) static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
{ {
u64 now, clock, old_clock, min_clock, max_clock, gtod; u64 now, clock, old_clock, min_clock, max_clock, gtod;
s64 delta; s64 delta;
...@@ -287,13 +287,28 @@ notrace static u64 sched_clock_local(struct sched_clock_data *scd) ...@@ -287,13 +287,28 @@ notrace static u64 sched_clock_local(struct sched_clock_data *scd)
clock = wrap_max(clock, min_clock); clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock); clock = wrap_min(clock, max_clock);
if (!try_cmpxchg64(&scd->clock, &old_clock, clock)) if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock))
goto again; goto again;
return clock; return clock;
} }
notrace static u64 sched_clock_remote(struct sched_clock_data *scd) noinstr u64 local_clock(void)
{
u64 clock;
if (static_branch_likely(&__sched_clock_stable))
return sched_clock() + __sched_clock_offset;
preempt_disable_notrace();
clock = sched_clock_local(this_scd());
preempt_enable_notrace();
return clock;
}
EXPORT_SYMBOL_GPL(local_clock);
static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
{ {
struct sched_clock_data *my_scd = this_scd(); struct sched_clock_data *my_scd = this_scd();
u64 this_clock, remote_clock; u64 this_clock, remote_clock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment