Commit c1955a3d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched_clock: delay using sched_clock()

Some arch's can't handle sched_clock() being called too early - delay
this until sched_clock_init() has been called.
Reported-by: default avatarBill Gatliff <bgat@billgatliff.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: default avatarNishanth Aravamudan <nacc@us.ibm.com>
CC: Russell King - ARM Linux <linux@arm.linux.org.uk>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4a273f20
...@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) ...@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern unsigned long long sched_clock(void); extern unsigned long long sched_clock(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK extern void sched_clock_init(void);
static inline void sched_clock_init(void) extern u64 sched_clock_cpu(int cpu);
{
}
static inline u64 sched_clock_cpu(int cpu)
{
return sched_clock();
}
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void) static inline void sched_clock_tick(void)
{ {
} }
...@@ -1573,8 +1567,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) ...@@ -1573,8 +1567,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{ {
} }
#else #else
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_tick(void); extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns); extern void sched_clock_idle_wakeup_event(u64 delta_ns);
......
...@@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void) ...@@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
} }
static __read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
struct sched_clock_data { struct sched_clock_data {
...@@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) ...@@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
return &per_cpu(sched_clock_data, cpu); return &per_cpu(sched_clock_data, cpu);
} }
static __read_mostly int sched_clock_running;
void sched_clock_init(void) void sched_clock_init(void)
{ {
u64 ktime_now = ktime_to_ns(ktime_get()); u64 ktime_now = ktime_to_ns(ktime_get());
...@@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) ...@@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
} }
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init(void)
{
sched_clock_running = 1;
}
u64 sched_clock_cpu(int cpu)
{
if (unlikely(!sched_clock_running))
return 0;
return sched_clock();
}
#endif #endif
unsigned long long cpu_clock(int cpu) unsigned long long cpu_clock(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment