Commit 0d651e4e authored by Mark Rutland's avatar Mark Rutland

clocksource: arch_timer: use virtual counters

Switching between reading the virtual or physical counters is
problematic, as some core code wants a view of time before we're fully
set up. Using a function pointer and switching the source after the
first read can make time appear to go backwards, and having a check in
the read function is an unfortunate block on what we want to be a fast
path.

Instead, this patch makes us always use the virtual counters. If we're a
guest, or don't have hyp mode, we'll use the virtual timers, and as such
don't care about CNTVOFF as long as it doesn't change in such a way as
to make time appear to travel backwards. As the guest will use the
virtual timers, a (potential) KVM host must use the physical timers
(which can wake up the host even if they fire while a guest is
executing), and hence a host must have CNTVOFF set to zero so as to have
a consistent view of time between the physical timers and virtual
counters.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Cc: Rob Herring <rob.herring@calxeda.com>
parent f793c23e
...@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void) ...@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void)
return val; return val;
} }
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void) static inline u64 arch_counter_get_cntvct(void)
{ {
u64 cval; u64 cval;
......
...@@ -110,16 +110,6 @@ static inline void __cpuinit arch_counter_set_user_access(void) ...@@ -110,16 +110,6 @@ static inline void __cpuinit arch_counter_set_user_access(void)
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
} }
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void) static inline u64 arch_counter_get_cntvct(void)
{ {
u64 cval; u64 cval;
......
...@@ -186,27 +186,19 @@ u32 arch_timer_get_rate(void) ...@@ -186,27 +186,19 @@ u32 arch_timer_get_rate(void)
return arch_timer_rate; return arch_timer_rate;
} }
/* u64 arch_timer_read_counter(void)
* Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
* call it before it has been initialised. Rather than incur a performance
* penalty checking for initialisation, provide a default implementation that
* won't lead to time appearing to jump backwards.
*/
static u64 arch_timer_read_zero(void)
{ {
return 0; return arch_counter_get_cntvct();
} }
u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
static cycle_t arch_counter_read(struct clocksource *cs) static cycle_t arch_counter_read(struct clocksource *cs)
{ {
return arch_timer_read_counter(); return arch_counter_get_cntvct();
} }
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{ {
return arch_timer_read_counter(); return arch_counter_get_cntvct();
} }
static struct clocksource clocksource_counter = { static struct clocksource clocksource_counter = {
...@@ -287,7 +279,7 @@ static int __init arch_timer_register(void) ...@@ -287,7 +279,7 @@ static int __init arch_timer_register(void)
cyclecounter.mult = clocksource_counter.mult; cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift; cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter, timecounter_init(&timecounter, &cyclecounter,
arch_counter_get_cntpct()); arch_counter_get_cntvct());
if (arch_timer_use_virtual) { if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI]; ppi = arch_timer_ppi[VIRT_PPI];
...@@ -376,11 +368,6 @@ static void __init arch_timer_init(struct device_node *np) ...@@ -376,11 +368,6 @@ static void __init arch_timer_init(struct device_node *np)
} }
} }
if (arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
arch_timer_register(); arch_timer_register();
arch_timer_arch_init(); arch_timer_arch_init();
} }
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#ifdef CONFIG_ARM_ARCH_TIMER #ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void); extern u32 arch_timer_get_rate(void);
extern u64 (*arch_timer_read_counter)(void); extern u64 arch_timer_read_counter(void);
extern struct timecounter *arch_timer_get_timecounter(void); extern struct timecounter *arch_timer_get_timecounter(void);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment