Commit 411f790c authored by Stefani Seibold's avatar Stefani Seibold Committed by H. Peter Anvin

x86, vdso: Revamp vclock_gettime.c

This intermediate patch revamps the vclock_gettime.c by moving some functions
around. It is only for spliting purpose, to make whole the 32 bit vdso timer
patch easier to review.
Reviewed-by: default avatarAndy Lutomirski <luto@amacapital.net>
Signed-off-by: default avatarStefani Seibold <stefani@seibold.net>
Link: http://lkml.kernel.org/r/1395094933-14252-4-git-send-email-stefani@seibold.netSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 3935ed6a
...@@ -26,41 +26,26 @@ ...@@ -26,41 +26,26 @@
#define gtod (&VVAR(vsyscall_gtod_data)) #define gtod (&VVAR(vsyscall_gtod_data))
notrace static cycle_t vread_tsc(void) static notrace cycle_t vread_hpet(void)
{ {
cycle_t ret; return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
u64 last; }
/*
* Empirically, a fence (of type that depends on the CPU)
* before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads. The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)vget_cycles();
last = VVAR(vsyscall_gtod_data).clock.cycle_last;
if (likely(ret >= last))
return ret;
/* notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
* GCC likes to generate cmov here, but this branch is extremely {
* predictable (it's just a funciton of time and the likely is long ret;
* very likely) and there's a data dependence, so force GCC asm("syscall" : "=a" (ret) :
* to generate a branch instead. I don't barrier() because "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
* we don't actually need a barrier, and if this function return ret;
* ever gets inlined it will generate worse code.
*/
asm volatile ("");
return last;
} }
static notrace cycle_t vread_hpet(void) notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{ {
return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER); long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
return ret;
} }
#ifdef CONFIG_PARAVIRT_CLOCK #ifdef CONFIG_PARAVIRT_CLOCK
...@@ -133,23 +118,37 @@ static notrace cycle_t vread_pvclock(int *mode) ...@@ -133,23 +118,37 @@ static notrace cycle_t vread_pvclock(int *mode)
} }
#endif #endif
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) notrace static cycle_t vread_tsc(void)
{ {
long ret; cycle_t ret;
asm("syscall" : "=a" (ret) : u64 last;
"0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
return ret;
}
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) /*
{ * Empirically, a fence (of type that depends on the CPU)
long ret; * before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads. The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)vget_cycles();
asm("syscall" : "=a" (ret) : last = VVAR(vsyscall_gtod_data).clock.cycle_last;
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
return ret;
}
if (likely(ret >= last))
return ret;
/*
* GCC likes to generate cmov here, but this branch is extremely
* predictable (it's just a funciton of time and the likely is
* very likely) and there's a data dependence, so force GCC
* to generate a branch instead. I don't barrier() because
* we don't actually need a barrier, and if this function
* ever gets inlined it will generate worse code.
*/
asm volatile ("");
return last;
}
notrace static inline u64 vgetsns(int *mode) notrace static inline u64 vgetsns(int *mode)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment