Commit f12a15be authored by John Stultz's avatar John Stultz Committed by Thomas Gleixner

x86: Convert common clocksources to use clocksource_register_hz/khz

This converts the most common of the x86 clocksources over to use
clocksource_register_hz/khz.
Signed-off-by: default avatarJohn Stultz <johnstul@us.ibm.com>
LKML-Reference: <1279068988-21864-11-git-send-email-johnstul@us.ibm.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 0fb86b06
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/hpet.h> #include <asm/hpet.h>
#define HPET_MASK CLOCKSOURCE_MASK(32) #define HPET_MASK CLOCKSOURCE_MASK(32)
#define HPET_SHIFT 22
/* FSEC = 10^-15 /* FSEC = 10^-15
NSEC = 10^-9 */ NSEC = 10^-9 */
...@@ -787,7 +786,6 @@ static struct clocksource clocksource_hpet = { ...@@ -787,7 +786,6 @@ static struct clocksource clocksource_hpet = {
.rating = 250, .rating = 250,
.read = read_hpet, .read = read_hpet,
.mask = HPET_MASK, .mask = HPET_MASK,
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = hpet_resume_counter, .resume = hpet_resume_counter,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -798,6 +796,7 @@ static struct clocksource clocksource_hpet = { ...@@ -798,6 +796,7 @@ static struct clocksource clocksource_hpet = {
static int hpet_clocksource_register(void) static int hpet_clocksource_register(void)
{ {
u64 start, now; u64 start, now;
u64 hpet_freq;
cycle_t t1; cycle_t t1;
/* Start the counter */ /* Start the counter */
...@@ -832,9 +831,15 @@ static int hpet_clocksource_register(void) ...@@ -832,9 +831,15 @@ static int hpet_clocksource_register(void)
* mult = (hpet_period * 2^shift)/10^6 * mult = (hpet_period * 2^shift)/10^6
* mult = (hpet_period << shift)/FSEC_PER_NSEC * mult = (hpet_period << shift)/FSEC_PER_NSEC
*/ */
clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT);
clocksource_register(&clocksource_hpet); /* Need to convert hpet_period (fsec/cyc) to cyc/sec:
*
* cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc)
* cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period
*/
hpet_freq = FSEC_PER_NSEC * NSEC_PER_SEC;
do_div(hpet_freq, hpet_period);
clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
return 0; return 0;
} }
......
...@@ -751,7 +751,6 @@ static struct clocksource clocksource_tsc = { ...@@ -751,7 +751,6 @@ static struct clocksource clocksource_tsc = {
.read = read_tsc, .read = read_tsc,
.resume = resume_tsc, .resume = resume_tsc,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY, CLOCK_SOURCE_MUST_VERIFY,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -845,8 +844,6 @@ __cpuinit int unsynchronized_tsc(void) ...@@ -845,8 +844,6 @@ __cpuinit int unsynchronized_tsc(void)
static void __init init_tsc_clocksource(void) static void __init init_tsc_clocksource(void)
{ {
clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
clocksource_tsc.shift);
if (tsc_clocksource_reliable) if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
/* lower the rating if we already know its unstable: */ /* lower the rating if we already know its unstable: */
...@@ -854,7 +851,7 @@ static void __init init_tsc_clocksource(void) ...@@ -854,7 +851,7 @@ static void __init init_tsc_clocksource(void)
clocksource_tsc.rating = 0; clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
} }
clocksource_register(&clocksource_tsc); clocksource_register_khz(&clocksource_tsc, tsc_khz);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -68,10 +68,7 @@ static struct clocksource clocksource_acpi_pm = { ...@@ -68,10 +68,7 @@ static struct clocksource clocksource_acpi_pm = {
.rating = 200, .rating = 200,
.read = acpi_pm_read, .read = acpi_pm_read,
.mask = (cycle_t)ACPI_PM_MASK, .mask = (cycle_t)ACPI_PM_MASK,
.mult = 0, /*to be calculated*/
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
...@@ -190,9 +187,6 @@ static int __init init_acpi_pm_clocksource(void) ...@@ -190,9 +187,6 @@ static int __init init_acpi_pm_clocksource(void)
if (!pmtmr_ioport) if (!pmtmr_ioport)
return -ENODEV; return -ENODEV;
clocksource_acpi_pm.mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC,
clocksource_acpi_pm.shift);
/* "verify" this timing source: */ /* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) { for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j); udelay(100 * j);
...@@ -220,7 +214,8 @@ static int __init init_acpi_pm_clocksource(void) ...@@ -220,7 +214,8 @@ static int __init init_acpi_pm_clocksource(void)
if (verify_pmtmr_rate() != 0) if (verify_pmtmr_rate() != 0)
return -ENODEV; return -ENODEV;
return clocksource_register(&clocksource_acpi_pm); return clocksource_register_hz(&clocksource_acpi_pm,
PMTMR_TICKS_PER_SEC);
} }
/* We use fs_initcall because we want the PCI fixups to have run /* We use fs_initcall because we want the PCI fixups to have run
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment