Commit 8325d9c0 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched_clock: cleanups

- remove superfluous checks in __update_sched_clock()
- skip sched_clock_tick() for sched_clock_stable
- reinstate the simple !HAVE_UNSTABLE_SCHED_CLOCK code to please the bloatwatch
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 83ce4009
...@@ -44,9 +44,6 @@ static __read_mostly int sched_clock_running; ...@@ -44,9 +44,6 @@ static __read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__read_mostly int sched_clock_stable; __read_mostly int sched_clock_stable;
#else
static const int sched_clock_stable = 1;
#endif
struct sched_clock_data { struct sched_clock_data {
/* /*
...@@ -115,14 +112,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) ...@@ -115,14 +112,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
s64 delta = now - scd->tick_raw; s64 delta = now - scd->tick_raw;
u64 clock, min_clock, max_clock; u64 clock, min_clock, max_clock;
WARN_ON_ONCE(!irqs_disabled());
if (unlikely(delta < 0)) if (unlikely(delta < 0))
delta = 0; delta = 0;
if (unlikely(!sched_clock_running))
return 0ull;
/* /*
* scd->clock = clamp(scd->tick_gtod + delta, * scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock), * max(scd->tick_gtod, scd->clock),
...@@ -201,18 +193,20 @@ u64 sched_clock_cpu(int cpu) ...@@ -201,18 +193,20 @@ u64 sched_clock_cpu(int cpu)
return clock; return clock;
} }
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
void sched_clock_tick(void) void sched_clock_tick(void)
{ {
struct sched_clock_data *scd = this_scd(); struct sched_clock_data *scd;
u64 now, now_gtod; u64 now, now_gtod;
if (sched_clock_stable)
return;
if (unlikely(!sched_clock_running)) if (unlikely(!sched_clock_running))
return; return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
scd = this_scd();
now_gtod = ktime_to_ns(ktime_get()); now_gtod = ktime_to_ns(ktime_get());
now = sched_clock(); now = sched_clock();
...@@ -245,6 +239,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) ...@@ -245,6 +239,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
} }
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init(void)
{
sched_clock_running = 1;
}
u64 sched_clock_cpu(int cpu)
{
if (unlikely(!sched_clock_running))
return 0;
return sched_clock();
}
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned long long cpu_clock(int cpu) unsigned long long cpu_clock(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment