Commit 1c41006f authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] Time changes for x86-64

Some timer updates from Vojtech Pavlik for x86-64.  In theory support
HPET timing now, but the support is disabled.

Would actually need vxtime_lock() macros in the generic timer code to
protect xtime updates, but I'm leaving that out now because it's only
needed for vsyscalls and they're currently disabled.
parent 15aed72d
/*
* linux/arch/x86-64/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* "High Precision Event Timer" based timekeeping.
*
* Copyright (c) 1991,1992,1995 Linus Torvalds
* Copyright (c) 1994 Alan Modra
* Copyright (c) 1995 Markus Kuhn
* Copyright (c) 1996 Ingo Molnar
* Copyright (c) 1998 Andrea Arcangeli
* Copyright (c) 2002 Vojtech Pavlik
*
* This file contains the PC-specific time handling details:
* reading the RTC at bootup, etc..
* 1994-07-02 Alan Modra
* fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
* 1995-03-26 Markus Kuhn
* fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
* precision CMOS clock update
* 1996-05-03 Ingo Molnar
* fixed time warps in do_[slow|fast]_gettimeoffset()
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* 1998-09-05 (Various)
* More robust do_fast_gettimeoffset() algorithm implemented
* (works with APM, Cyrix 6x86MX and Centaur C6),
* monotonic gettimeofday() with fast_get_timeoffset(),
* drift-proof precision TSC calibration on boot
* (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
* Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
* ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
* 1998-12-16 Andrea Arcangeli
* Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
* because was not accounting lost_ticks.
* 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
* Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
* serialize accesses to xtime/lost_ticks).
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/device.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/msr.h>
#include <asm/delay.h>
#include <asm/mpspec.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <linux/mc146818rtc.h>
#include <linux/timex.h>
#include <linux/config.h>
#include <asm/fixmap.h>
#include <linux/irq.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/device.h>
#include <asm/vsyscall.h>
#include <asm/timex.h>
unsigned int cpu_khz; /* Detected as we calibrate the TSC */
/* Number of usecs that the last interrupt was delayed */
int __delay_at_last_interrupt __section_delay_at_last_interrupt;
unsigned int __last_tsc_low __section_last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
/* Cached *multiplier* to convert TSC counts to microseconds.
* (see the equation below).
* Equal to 2^32 * (1 / (clocks per usec) ).
* Initialized in time_init.
*/
unsigned int __fast_gettimeoffset_quotient __section_fast_gettimeoffset_quotient;
u64 jiffies_64;
extern rwlock_t xtime_lock;
struct timeval __xtime __section_xtime;
unsigned long __wall_jiffies __section_wall_jiffies;
struct timezone __sys_tz __section_sys_tz;
volatile unsigned long __jiffies __section_jiffies;
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
inline unsigned long do_gettimeoffset(void)
{
register unsigned int eax, edx;
/* Read the Time Stamp Counter */
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
int hpet_report_lost_ticks; /* command line option */
rdtsc(eax,edx);
struct hpet_data __hpet __section_hpet; /* address, quotient, trigger, hz */
/* .. relative to previous jiffy (32 bits is enough) */
eax -= last_tsc_low; /* tsc_low delta */
volatile unsigned long __jiffies __section_jiffies;
unsigned long __wall_jiffies __section_wall_jiffies;
struct timespec __xtime __section_xtime;
struct timezone __sys_tz __section_sys_tz;
/*
* Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
* = (tsc_low delta) * (usecs_per_clock)
* = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
*
* Using a mull instead of a divl saves up to 31 clock cycles
* in the critical path.
/*
* do_gettimeoffset() returns microseconds since last timer interrupt was
* triggered by hardware. A memory read of HPET is slower than a register read
* of TSC, but much more reliable. It's also synchronized to the timer
* interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
* timer interrupt has happened already, but hpet.trigger wasn't updated yet.
* This is not a problem, because jiffies hasn't updated either. They are bound
* together by xtime_lock.
*/
__asm__("mull %2"
:"=a" (eax), "=d" (edx)
:"rm" (fast_gettimeoffset_quotient),
"0" (eax));
static spinlock_t time_offset_lock = SPIN_LOCK_UNLOCKED;
static unsigned long timeoffset = 0;
/* our adjusted time offset in microseconds */
return delay_at_last_interrupt + edx;
inline unsigned int do_gettimeoffset(void)
{
unsigned long t;
rdtscll(t);
return (t - hpet.last_tsc) * (1000000L / HZ) / hpet.ticks + hpet.offset;
}
#define TICK_SIZE tick
spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
extern spinlock_t i8259A_lock;
/*
* This version of gettimeofday has microsecond resolution
* and better than microsecond precision on fast x86 machines with TSC.
* This version of gettimeofday() has microsecond resolution and better than
* microsecond precision, as we're using at least a 10 MHz (usually 14.31818
* MHz) HPET timer.
*/
void do_gettimeofday(struct timeval *tv)
{
unsigned long flags;
unsigned long usec, sec;
unsigned long flags, t;
unsigned int sec, usec;
read_lock_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
{
unsigned long lost = jiffies - wall_jiffies;
if (lost)
usec += lost * (1000000 / HZ);
}
spin_lock(&time_offset_lock);
sec = xtime.tv_sec;
usec += xtime.tv_usec;
read_unlock_irqrestore(&xtime_lock, flags);
usec = xtime.tv_nsec / 1000;
while (usec >= 1000000) {
usec -= 1000000;
sec++;
}
t = (jiffies - wall_jiffies) * (1000000L / HZ) + do_gettimeoffset();
if (t > timeoffset) timeoffset = t;
usec += timeoffset;
tv->tv_sec = sec;
tv->tv_usec = usec;
spin_unlock(&time_offset_lock);
read_unlock_irqrestore(&xtime_lock, flags);
tv->tv_sec = sec + usec / 1000000;
tv->tv_usec = usec % 1000000;
}
/*
* settimeofday() first undoes the correction that gettimeofday would do
* on the time, and then saves it. This is ugly, but has been like this for
* ages already.
*/
void do_settimeofday(struct timeval *tv)
{
write_lock_irq(&xtime_lock);
vxtime_lock();
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
tv->tv_usec -= do_gettimeoffset();
tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
tv->tv_usec -= do_gettimeoffset() +
(jiffies - wall_jiffies) * tick_usec;
while (tv->tv_usec < 0) {
tv->tv_usec += 1000000;
tv->tv_sec--;
}
xtime = *tv;
xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = (tv->tv_usec * 1000);
vxtime_unlock();
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
write_unlock_irq(&xtime_lock);
}
/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when
* nowtime is written into the registers of the CMOS clock, it will
* jump to the next second precisely 500 ms later. Check the Motorola
* MC146818A or Dallas DS12887 data sheet for details.
*
* BUG: This routine does not handle hour overflow properly; it just
* sets the minutes. Usually you'll only notice that after reboot!
* In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
* ms after the second nowtime has started, because when nowtime is written
* into the registers of the CMOS clock, it will jump to the next second
* precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
* sheet for details.
*/
static int set_rtc_mmss(unsigned long nowtime)
static void set_rtc_mmss(unsigned long nowtime)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
unsigned char control, freq_select;
/*
* IRQs are disabled when we're called from the timer interrupt,
* no need for spin_lock_irqsave()
*/
/* gets recalled with irq locally disabled */
spin_lock(&rtc_lock);
save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
/*
* Tell the clock it's being set and stop it.
*/
control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
cmos_minutes = CMOS_READ(RTC_MINUTES);
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
BCD_TO_BIN(cmos_minutes);
/*
* since we're only adjusting minutes and seconds,
* don't interfere with hour overflow. This avoids
* messing with unknown time zones but requires your
* RTC not to be off by more than 15 minutes
/*
* since we're only adjusting minutes and seconds, don't interfere with hour
* overflow. This avoids messing with unknown time zones but requires your RTC
* not to be off by more than 15 minutes. Since we're calling it only when
* our clock is externally synchronized using NTP, this shouldn't be a problem.
*/
real_seconds = nowtime % 60;
real_minutes = nowtime / 60;
if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
real_minutes += 30; /* correct for half hour time zone */
real_minutes %= 60;
if (abs(real_minutes - cmos_minutes) < 30) {
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BIN_TO_BCD(real_seconds);
BIN_TO_BCD(real_minutes);
}
CMOS_WRITE(real_seconds,RTC_SECONDS);
CMOS_WRITE(real_minutes,RTC_MINUTES);
} else {
printk(KERN_WARNING
"set_rtc_mmss: can't update from %d to %d\n",
CMOS_WRITE(real_seconds, RTC_SECONDS);
CMOS_WRITE(real_minutes, RTC_MINUTES);
} else
printk(KERN_WARNING "time.c: can't update CMOS clock from %d to %d\n",
cmos_minutes, real_minutes);
retval = -1;
}
/* The following flags have to be released exactly in this order,
* otherwise the DS12887 (popular MC146818A clone with integrated
* battery and quartz) will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned in
* the Dallas Semiconductor data sheets, but who believes data
* sheets anyway ... -- Markus Kuhn
*/
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock(&rtc_lock);
return retval;
}
/* last time the cmos clock got updated */
static long last_rtc_update;
int timer_ack;
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
* The following flags have to be released exactly in this order, otherwise the
* DS12887 (popular MC146818A clone with integrated battery and quartz) will
* not reset the oscillator and will not update precisely 500 ms later. You
* won't find this mentioned in the Dallas Semiconductor data sheets, but who
* believes data sheets anyway ... -- Markus Kuhn
*/
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
#ifdef CONFIG_X86_IO_APIC
if (timer_ack) {
/*
* Subtle, when I/O APICs are used we have to ack timer IRQ
* manually to reset the IRR bit for do_slow_gettimeoffset().
* This will also deassert NMI lines for the watchdog if run
* on an 82489DX-based system.
*/
spin_lock(&i8259A_lock);
outb(0x0c, 0x20);
/* Ack the IRQ; AEOI will end it automatically. */
inb(0x20);
spin_unlock(&i8259A_lock);
}
#endif
do_timer(regs);
/*
* In the SMP case we use the local APIC timer interrupt to do the
* profiling, except when we simulate SMP mode on a uniprocessor
* system, in that case we have to call the local interrupt handler.
*/
#ifndef CONFIG_X86_LOCAL_APIC
if (!user_mode(regs))
x86_do_profile(regs->rip);
#else
if (!using_apic_timer)
smp_local_timer_interrupt(regs);
#endif
/*
* If we have an externally synchronized Linux clock, then update
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
if ((time_status & STA_UNSYNC) == 0 &&
xtime.tv_sec > last_rtc_update + 660 &&
xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
if (set_rtc_mmss(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
}
}
CMOS_WRITE(control, RTC_CONTROL);
CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
static int use_tsc;
spin_unlock(&rtc_lock);
}
/*
* This is the same as the above, except we _also_ save the current
* Time Stamp Counter value at the time of the timer interrupt, so that
* we later on can estimate the time of day more exactly.
*/
static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
int count;
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
* CPU. We need to avoid to SMP race with it. NOTE: we don' t need
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
static unsigned long rtc_update = 0;
/*
* Here we are in the timer irq handler. We have irqs locally disabled (so we
* don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
* on the other CPU, so we need a lock. We also need to lock the vsyscall
* variables, because both do_timer() and us change them -arca+vojtech
*/
write_lock(&xtime_lock);
vxtime_lock();
if (use_tsc)
{
/*
* It is important that these two operations happen almost at
* the same time. We do the RDTSC stuff first, since it's
* faster. To avoid any inconsistencies, we need interrupts
* disabled locally.
*/
/*
* Interrupts are just disabled locally since the timer irq
* has the SA_INTERRUPT flag set. -arca
*/
/* read Pentium cycle counter */
unsigned long t;
rdtscll(t);
hpet.offset = (t - hpet.last_tsc) * (1000000L / HZ) / hpet.ticks + hpet.offset - 1000000L / HZ;
if (hpet.offset >= 1000000L / HZ)
hpet.offset = 0;
hpet.ticks = min_t(long, max_t(long, (t - hpet.last_tsc) * (1000000L / HZ) / (1000000L / HZ - hpet.offset),
cpu_khz * 1000/HZ * 15 / 16), cpu_khz * 1000/HZ * 16 / 15);
hpet.last_tsc = t;
}
rdtscl(last_tsc_low);
/*
* Do the timer stuff.
*/
spin_lock(&i8253_lock);
outb_p(0x00, 0x43); /* latch the count ASAP */
do_timer(regs);
count = inb_p(0x40); /* read the latched count */
count |= inb(0x40) << 8;
spin_unlock(&i8253_lock);
/*
* If we have an externally synchronized Linux clock, then update CMOS clock
* accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
* closest to exactly 500 ms before the next second. If the update fails, we
* don'tcare, as it'll be updated on the next turn, and the problem (time way
* off) isn't likely to go away much sooner anyway.
*/
count = ((LATCH-1) - count) * TICK_SIZE;
delay_at_last_interrupt = (count + LATCH/2) / LATCH;
if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
set_rtc_mmss(xtime.tv_sec);
rtc_update = xtime.tv_sec + 660;
}
do_timer_interrupt(irq, NULL, regs);
vxtime_unlock();
write_unlock(&xtime_lock);
}
/* not static: needed by APM */
unsigned long get_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
int i;
unsigned int timeout, year, mon, day, hour, min, sec;
unsigned char last, this;
/*
* The Linux interpretation of the CMOS clock register contents: When the
* Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
* second which has precisely just started. Waiting for this can take up to 1
* second, we timeout approximately after 2.4 seconds on a machine with
* standard 8.3 MHz ISA bus.
*/
spin_lock(&rtc_lock);
/* The Linux interpretation of the CMOS clock register contents:
* When the Update-In-Progress (UIP) flag goes from 1 to 0, the
* RTC registers show the second which has precisely just started.
* Let's hope other operating systems interpret the RTC the same way.
timeout = 1000000;
last = this = 0;
while (timeout && last && !this) {
last = this;
this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
timeout--;
}
/*
* Here we are safe to assume the registers won't change for a whole second, so
* we just go ahead and read them.
*/
/* read RTC exactly on falling edge of update flag */
for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
break;
for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
break;
do { /* Isn't this overkill ? UIP above should guarantee consistency */
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
hour = CMOS_READ(RTC_HOURS);
day = CMOS_READ(RTC_DAY_OF_MONTH);
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
} while (sec != CMOS_READ(RTC_SECONDS));
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
spin_unlock(&rtc_lock);
/*
* We know that x86-64 always uses BCD format, no need to check the config
* register.
*/
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
BCD_TO_BIN(hour);
BCD_TO_BIN(day);
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
}
spin_unlock(&rtc_lock);
/*
* This will work up to Dec 31, 2069.
*/
if ((year += 1900) < 1970)
year += 100;
return mktime(year, mon, day, hour, min, sec);
}
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
/* ------ Calibrate the TSC -------
* Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
* Too much 64-bit arithmetic here to do this cleanly in C, and for
* accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
* output busy loop as low as possible. We avoid reading the CTC registers
* directly because of the awkward 8-bit access mechanism of the 82C54
* device.
/*
* calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
* it to the HPET timer of known frequency.
*/
#define CALIBRATE_LATCH (5 * LATCH)
#define CALIBRATE_TIME (5 * 1000020/HZ)
#define TICK_COUNT 100000000
/*
* pit_calibrate_tsc() uses the speaker output (channel 2) of
* the PIT. This is better than using the timer interrupt output,
* because we can read the value of the speaker with just one inb(),
* where we need three i/o operations for the interrupt channel.
* We count how many ticks the TSC does in 50 ms.
*/
/* Could use 64bit arithmetic on x86-64, but the code is too fragile */
static unsigned long __init calibrate_tsc(void)
static unsigned int __init pit_calibrate_tsc(void)
{
/* Set the Gate high, disable speaker */
unsigned long start, end;
unsigned long flags;
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
/*
* Now let's take care of CTC channel 2
*
* Set the Gate high, program CTC channel 2 for mode 0,
* (interrupt on terminal count mode), binary count,
* load 5 * LATCH count, (LSB and MSB) to begin countdown.
*/
outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
local_irq_save(flags);
local_irq_disable();
{
unsigned int startlow, starthigh;
unsigned int endlow, endhigh;
unsigned int count;
rdtsc(startlow,starthigh);
count = 0;
do {
count++;
} while ((inb(0x61) & 0x20) == 0);
rdtsc(endlow,endhigh);
last_tsc_low = endlow;
/* Error: ECTCNEVERSET */
if (count <= 1)
goto bad_ctc;
__asm__("subl %2,%0\n\t"
"sbbl %3,%1"
:"=a" (endlow), "=d" (endhigh)
:"g" (startlow), "g" (starthigh),
"0" (endlow), "1" (endhigh));
/* Error: ECPUTOOFAST */
if (endhigh)
goto bad_ctc;
/* Error: ECPUTOOSLOW */
if (endlow <= CALIBRATE_TIME)
goto bad_ctc;
__asm__("divl %2"
:"=a" (endlow), "=d" (endhigh)
:"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
outb(0xb0, 0x43);
outb((1193182 / (1000 / 50)) & 0xff, 0x42);
outb((1193182 / (1000 / 50)) >> 8, 0x42);
rdtscll(start);
return endlow;
}
while ((inb(0x61) & 0x20) == 0);
rdtscll(end);
/*
* The CTC wasn't reliable: we got a hit on the very first read,
* or the CPU was so fast/slow that the quotient wouldn't fit in
* 32 bits..
*/
bad_ctc:
return 0;
}
void __init time_init(void)
{
extern int x86_udelay_tsc;
local_irq_restore(flags);
xtime.tv_sec = get_cmos_time();
xtime.tv_usec = 0;
/*
* If we have APM enabled or the CPU clock speed is variable
* (CPU stops clock on HLT or slows clock to save power)
* then the TSC timestamps may diverge by up to 1 jiffy from
* 'real time' but nothing will break.
* The most frequent case is that the CPU is "woken" from a halt
* state by the timer interrupt itself, so we get 0 error. In the
* rare cases where a driver would "wake" the CPU and request a
* timestamp, the maximum error is < 1 jiffy. But timestamps are
* still perfectly ordered.
*/
return (end - start) / 50;
}
if (cpu_has_tsc) {
unsigned int tsc_quotient = calibrate_tsc();
if (tsc_quotient) {
fast_gettimeoffset_quotient = tsc_quotient;
use_tsc = 1;
/*
* We could be more selective here I suspect
* and just enable this for the next intel chips ?
*/
x86_udelay_tsc = 1;
/* report CPU clock rate in Hz.
* The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
* clock/second. Our precision is about 100 ppm.
*/
{ unsigned int eax=0, edx=1000;
__asm__("divl %2"
:"=a" (cpu_khz), "=d" (edx)
:"r" (tsc_quotient),
"0" (eax), "1" (edx));
printk("Detected %u.%03u MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
}
}
}
void __init pit_init(void)
{
outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
outb_p(LATCH & 0xff, 0x40); /* LSB */
outb_p(LATCH >> 8, 0x40); /* MSB */
}
setup_irq(0, &irq0);
int __init time_setup(char *str)
{
hpet_report_lost_ticks = 1;
return 1;
}
static struct device device_i8253 = {
name: "i8253",
bus_id: "0040",
};
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
extern void __init config_acpi_tables(void);
static int time_init_driverfs(void)
void __init time_init(void)
{
return register_sys_device(&device_i8253);
xtime.tv_sec = get_cmos_time();
xtime.tv_nsec = 0;
pit_init();
printk(KERN_INFO "time.c: Using 1.1931816 MHz PIT timer.\n");
setup_irq(0, &irq0);
cpu_khz = pit_calibrate_tsc();
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
hpet.ticks = cpu_khz * (1000 / HZ);
rdtscll(hpet.last_tsc);
}
__initcall(time_init_driverfs);
__setup("report_lost_ticks", time_setup);
......@@ -47,7 +47,7 @@
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
//#define NO_VSYSCALL 1
#define NO_VSYSCALL 1
#ifdef NO_VSYSCALL
#include <asm/unistd.h>
......@@ -71,51 +71,27 @@ static inline void timeval_normalize(struct timeval * tv)
long __vxtime_sequence[2] __section_vxtime_sequence;
static inline void do_vgettimeofday(struct timeval * tv)
{
long sequence;
unsigned long usec, sec;
long sequence, t;
unsigned long sec, usec;
do {
unsigned long eax, edx;
sequence = __vxtime_sequence[1];
rmb();
/* Read the Time Stamp Counter */
rdtsc(eax,edx);
/* .. relative to previous jiffy (32 bits is enough) */
eax -= __last_tsc_low; /* tsc_low delta */
/*
* Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
* = (tsc_low delta) * (usecs_per_clock)
* = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
*
* Using a mull instead of a divl saves up to 31 clock cycles
* in the critical path.
*/
edx = (eax*__fast_gettimeoffset_quotient) >> 32;
/* our adjusted time offset in microseconds */
usec = __delay_at_last_interrupt + edx;
{
unsigned long lost = __jiffies - __wall_jiffies;
if (lost)
usec += lost * (1000000 / HZ);
}
rdtscll(t);
sec = __xtime.tv_sec;
usec += __xtime.tv_usec;
usec = __xtime.tv_usec +
(__jiffies - __wall_jiffies) * (1000000 / HZ) +
(t - __hpet.last_tsc) * (1000000 / HZ) / __hpet.ticks + __hpet.offset;
rmb();
} while (sequence != __vxtime_sequence[0]);
tv->tv_sec = sec;
tv->tv_usec = usec;
timeval_normalize(tv);
tv->tv_sec = sec + usec / 1000000;
tv->tv_usec = usec % 1000000;
}
static inline void do_get_tz(struct timezone * tz)
......
......@@ -36,16 +36,14 @@ extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void)
{
#ifndef CONFIG_X86_TSC
return 0;
#else
unsigned long long ret;
rdtscll(ret);
return ret;
#endif
}
extern unsigned int cpu_khz;
extern struct hpet_data hpet;
#endif
#ifndef _ASM_X86_64_VSYSCALL_H_
#define _ASM_X86_64_VSYSCALL_H_
#include <linux/time.h>
enum vsyscall_num {
__NR_vgettimeofday,
__NR_vtime,
......@@ -13,32 +15,39 @@ enum vsyscall_num {
#ifdef __KERNEL__
#define __section_last_tsc_low __attribute__ ((unused, __section__ (".last_tsc_low")))
#define __section_delay_at_last_interrupt __attribute__ ((unused, __section__ (".delay_at_last_interrupt")))
#define __section_fast_gettimeoffset_quotient __attribute__ ((unused, __section__ (".fast_gettimeoffset_quotient")))
#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies")))
#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies")))
#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz")))
#define __section_xtime __attribute__ ((unused, __section__ (".xtime")))
#define __section_vxtime_sequence __attribute__ ((unused, __section__ (".vxtime_sequence")))
#define __section_hpet __attribute__ ((unused, __section__ (".hpet"), aligned(16)))
#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
#define __section_vxtime_sequence __attribute__ ((unused, __section__ (".vxtime_sequence"), aligned(16)))
struct hpet_data {
long address; /* base address */
unsigned long hz; /* HPET clocks / sec */
int trigger; /* value at last interrupt */
int last;
int offset;
unsigned long last_tsc;
long ticks;
};
#define hpet_readl(a) readl(fix_to_virt(FIX_HPET_BASE) + a)
#define hpet_writel(d,a) writel(d, fix_to_virt(FIX_HPET_BASE) + a)
/* vsyscall space (readonly) */
extern long __vxtime_sequence[2];
extern int __delay_at_last_interrupt;
extern unsigned int __last_tsc_low;
extern unsigned int __fast_gettimeoffset_quotient;
extern struct timeval __xtime;
extern struct hpet_data __hpet;
extern struct timespec __xtime;
extern volatile unsigned long __jiffies;
extern unsigned long __wall_jiffies;
extern struct timezone __sys_tz;
/* kernel space (writeable) */
extern unsigned long last_tsc_low;
extern int delay_at_last_interrupt;
extern unsigned int fast_gettimeoffset_quotient;
extern long vxtime_sequence[2];
extern struct hpet_data hpet;
extern unsigned long wall_jiffies;
extern struct timezone sys_tz;
extern long vxtime_sequence[2];
#define vxtime_lock() do { vxtime_sequence[0]++; wmb(); } while(0)
#define vxtime_unlock() do { wmb(); vxtime_sequence[1]++; } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment