Commit 2b7e8ff7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sched_clock() for ppc, ppc64, x86_64 and sparc64

Ingo's CPU scheduler update (in -mm kernels) needs a new sched_clock()
function which returns nanoseconds.

The patch provides implementations for ppc, ppc64, x86_64 and sparc64.

The x86_64 version could have overflow issues, the calculation is done in
32bits only with an multiply.  But I hope it's good enough for the scheduler

The ppc64 version needs scaling: it's only accurate for 1GHz CPUs.
parent f221af36
...@@ -83,6 +83,7 @@ time_t last_rtc_update; ...@@ -83,6 +83,7 @@ time_t last_rtc_update;
unsigned tb_ticks_per_jiffy; unsigned tb_ticks_per_jiffy;
unsigned tb_to_us; unsigned tb_to_us;
unsigned tb_last_stamp; unsigned tb_last_stamp;
unsigned long tb_to_ns_scale;
extern unsigned long wall_jiffies; extern unsigned long wall_jiffies;
...@@ -309,6 +310,7 @@ void __init time_init(void) ...@@ -309,6 +310,7 @@ void __init time_init(void)
tb_to_us = 0x418937; tb_to_us = 0x418937;
} else { } else {
ppc_md.calibrate_decr(); ppc_md.calibrate_decr();
tb_to_ns_scale = mulhwu(tb_to_us, 1000 << 10);
} }
/* Now that the decrementer is calibrated, it can be used in case the /* Now that the decrementer is calibrated, it can be used in case the
...@@ -432,3 +434,26 @@ unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { ...@@ -432,3 +434,26 @@ unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
return mlt; return mlt;
} }
unsigned long long sched_clock(void)
{
unsigned long lo, hi, hi2;
unsigned long long tb;
if (!__USE_RTC()) {
do {
hi = get_tbu();
lo = get_tbl();
hi2 = get_tbu();
} while (hi2 != hi);
tb = ((unsigned long long) hi << 32) | lo;
tb = (tb * tb_to_ns_scale) >> 10;
} else {
do {
hi = get_rtcu();
lo = get_rtcl();
hi2 = get_rtcu();
} while (hi2 != hi);
tb = ((unsigned long long) hi) * 1000000000 + lo;
}
return tb;
}
...@@ -307,6 +307,15 @@ int timer_interrupt(struct pt_regs * regs) ...@@ -307,6 +307,15 @@ int timer_interrupt(struct pt_regs * regs)
return 1; return 1;
} }
/*
* Scheduler clock - returns current time in nanosec units.
*
* This is wrong, but my CPUs run at 1GHz, so nyer nyer.
*/
unsigned long long sched_clock(void)
{
return get_tb();
}
/* /*
* This version of gettimeofday has microsecond resolution. * This version of gettimeofday has microsecond resolution.
......
...@@ -416,6 +416,7 @@ unsigned long timer_tick_offset; ...@@ -416,6 +416,7 @@ unsigned long timer_tick_offset;
unsigned long timer_tick_compare; unsigned long timer_tick_compare;
static unsigned long timer_ticks_per_usec_quotient; static unsigned long timer_ticks_per_usec_quotient;
static unsigned long timer_ticks_per_nsec_quotient;
#define TICK_SIZE (tick_nsec / 1000) #define TICK_SIZE (tick_nsec / 1000)
...@@ -1051,12 +1052,18 @@ static struct notifier_block sparc64_cpufreq_notifier_block = { ...@@ -1051,12 +1052,18 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
#endif #endif
/* The quotient formula is taken from the IA64 port. */ /* The quotient formula is taken from the IA64 port. */
#define SPARC64_USEC_PER_CYC_SHIFT 30UL
#define SPARC64_NSEC_PER_CYC_SHIFT 30UL
void __init time_init(void) void __init time_init(void)
{ {
unsigned long clock = sparc64_init_timers(timer_interrupt); unsigned long clock = sparc64_init_timers(timer_interrupt);
timer_ticks_per_usec_quotient = timer_ticks_per_usec_quotient =
(((1000000UL << 30) + (((1000000UL << SPARC64_USEC_PER_CYC_SHIFT) +
(clock / 2)) / clock);
timer_ticks_per_nsec_quotient =
(((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
(clock / 2)) / clock); (clock / 2)) / clock);
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
...@@ -1072,7 +1079,16 @@ static __inline__ unsigned long do_gettimeoffset(void) ...@@ -1072,7 +1079,16 @@ static __inline__ unsigned long do_gettimeoffset(void)
ticks += timer_tick_offset; ticks += timer_tick_offset;
ticks -= timer_tick_compare; ticks -= timer_tick_compare;
return (ticks * timer_ticks_per_usec_quotient) >> 30UL; return (ticks * timer_ticks_per_usec_quotient)
>> SPARC64_USEC_PER_CYC_SHIFT;
}
unsigned long long sched_clock(void)
{
unsigned long ticks = tick_ops->get_tick();
return (ticks * timer_ticks_per_nsec_quotient)
>> SPARC64_NSEC_PER_CYC_SHIFT;
} }
int do_settimeofday(struct timespec *tv) int do_settimeofday(struct timespec *tv)
......
...@@ -370,6 +370,19 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -370,6 +370,19 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* RED-PEN: calculation is done in 32bits with multiply for performance
and could overflow, it may be better (but slower)to use an 64bit division. */
unsigned long long sched_clock(void)
{
unsigned long a;
if (__vxtime.mode == VXTIME_HPET)
return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
rdtscll(a);
return (a * vxtime.tsc_quot) >> 32;
}
unsigned long get_cmos_time(void) unsigned long get_cmos_time(void)
{ {
unsigned int timeout, year, mon, day, hour, min, sec; unsigned int timeout, year, mon, day, hour, min, sec;
......
...@@ -97,6 +97,13 @@ extern __inline__ unsigned long get_rtcl(void) { ...@@ -97,6 +97,13 @@ extern __inline__ unsigned long get_rtcl(void) {
return rtcl; return rtcl;
} }
extern __inline__ unsigned long get_rtcu(void)
{
unsigned long rtcu;
asm volatile("mfrtcu %0" : "=r" (rtcu));
return rtcu;
}
extern __inline__ unsigned get_native_tbl(void) { extern __inline__ unsigned get_native_tbl(void) {
if (__USE_RTC()) if (__USE_RTC())
return get_rtcl(); return get_rtcl();
...@@ -140,6 +147,7 @@ extern __inline__ unsigned binary_tbl(void) { ...@@ -140,6 +147,7 @@ extern __inline__ unsigned binary_tbl(void) {
#endif #endif
/* Use mulhwu to scale processor timebase to timeval */ /* Use mulhwu to scale processor timebase to timeval */
/* Specifically, this computes (x * y) / 2^32. -- paulus */
#define mulhwu(x,y) \ #define mulhwu(x,y) \
({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment