Commit a03fdb76 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (34 commits)
  time: Prevent 32 bit overflow with set_normalized_timespec()
  clocksource: Delay clocksource down rating to late boot
  clocksource: clocksource_select must be called with mutex locked
  clocksource: Resolve cpu hotplug dead lock with TSC unstable, fix crash
  timers: Drop a function prototype
  clocksource: Resolve cpu hotplug dead lock with TSC unstable
  timer.c: Fix S/390 comments
  timekeeping: Fix invalid getboottime() value
  timekeeping: Fix up read_persistent_clock() breakage on sh
  timekeeping: Increase granularity of read_persistent_clock(), build fix
  time: Introduce CLOCK_REALTIME_COARSE
  x86: Do not unregister PIT clocksource on PIT oneshot setup/shutdown
  clocksource: Avoid clocksource watchdog circular locking dependency
  clocksource: Protect the watchdog rating changes with clocksource_mutex
  clocksource: Call clocksource_change_rating() outside of watchdog_lock
  timekeeping: Introduce read_boot_clock
  timekeeping: Increase granularity of read_persistent_clock()
  timekeeping: Update clocksource with stop_machine
  timekeeping: Add timekeeper read_clock helper functions
  timekeeping: Move NTP adjusted clock multiplier to struct timekeeper
  ...

Fix trivial conflict due to MIPS lemote -> loongson renaming.
parents 202c4675 12e09337
...@@ -253,11 +253,8 @@ static struct clocksource clocksource_32k = { ...@@ -253,11 +253,8 @@ static struct clocksource clocksource_32k = {
*/ */
unsigned long long sched_clock(void) unsigned long long sched_clock(void)
{ {
unsigned long long ret; return clocksource_cyc2ns(clocksource_32k.read(&clocksource_32k),
clocksource_32k.mult, clocksource_32k.shift);
ret = (unsigned long long)clocksource_32k.read(&clocksource_32k);
ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift;
return ret;
} }
static int __init omap_init_clocksource_32k(void) static int __init omap_init_clocksource_32k(void)
......
...@@ -72,9 +72,10 @@ static unsigned long read_rtc_mmss(void) ...@@ -72,9 +72,10 @@ static unsigned long read_rtc_mmss(void)
return mktime(year, mon, day, hour, min, sec); return mktime(year, mon, day, hour, min, sec);
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
return read_rtc_mmss(); ts->tv_sec = read_rtc_mmss();
ts->tv_nsec = 0;
} }
int update_persistent_clock(struct timespec now) int update_persistent_clock(struct timespec now)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <asm/dec/ioasic.h> #include <asm/dec/ioasic.h>
#include <asm/dec/machtype.h> #include <asm/dec/machtype.h>
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
unsigned int year, mon, day, hour, min, sec, real_year; unsigned int year, mon, day, hour, min, sec, real_year;
unsigned long flags; unsigned long flags;
...@@ -53,7 +53,8 @@ unsigned long read_persistent_clock(void) ...@@ -53,7 +53,8 @@ unsigned long read_persistent_clock(void)
year += real_year - 72 + 2000; year += real_year - 72 + 2000;
return mktime(year, mon, day, hour, min, sec); ts->tv_sec = mktime(year, mon, day, hour, min, sec);
ts->tv_nsec = 0;
} }
/* /*
......
...@@ -135,7 +135,7 @@ static void rtc_end_op(void) ...@@ -135,7 +135,7 @@ static void rtc_end_op(void)
lasat_ndelay(1000); lasat_ndelay(1000);
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
unsigned long word; unsigned long word;
unsigned long flags; unsigned long flags;
...@@ -147,7 +147,8 @@ unsigned long read_persistent_clock(void) ...@@ -147,7 +147,8 @@ unsigned long read_persistent_clock(void)
rtc_end_op(); rtc_end_op();
spin_unlock_irqrestore(&rtc_lock, flags); spin_unlock_irqrestore(&rtc_lock, flags);
return word; ts->tv_sec = word;
ts->tv_nsec = 0;
} }
int rtc_mips_set_mmss(unsigned long time) int rtc_mips_set_mmss(unsigned long time)
......
...@@ -92,10 +92,12 @@ static int rtctmp; ...@@ -92,10 +92,12 @@ static int rtctmp;
int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp, loff_t *ppos) void *buffer, size_t *lenp, loff_t *ppos)
{ {
struct timespec ts;
int r; int r;
if (!write) { if (!write) {
rtctmp = read_persistent_clock(); read_persistent_clock(&ts);
rtctmp = ts.tv_sec;
/* check for time < 0 and set to 0 */ /* check for time < 0 and set to 0 */
if (rtctmp < 0) if (rtctmp < 0)
rtctmp = 0; rtctmp = 0;
...@@ -134,9 +136,11 @@ int sysctl_lasat_rtc(ctl_table *table, ...@@ -134,9 +136,11 @@ int sysctl_lasat_rtc(ctl_table *table,
void *oldval, size_t *oldlenp, void *oldval, size_t *oldlenp,
void *newval, size_t newlen) void *newval, size_t newlen)
{ {
struct timespec ts;
int r; int r;
rtctmp = read_persistent_clock(); read_persistent_clock(&ts);
rtctmp = ts.tv_sec;
if (rtctmp < 0) if (rtctmp < 0)
rtctmp = 0; rtctmp = 0;
r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); r = sysctl_intvec(table, oldval, oldlenp, newval, newlen);
......
...@@ -21,7 +21,8 @@ void __init plat_time_init(void) ...@@ -21,7 +21,8 @@ void __init plat_time_init(void)
mips_hpt_frequency = cpu_clock_freq / 2; mips_hpt_frequency = cpu_clock_freq / 2;
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
return mc146818_get_cmos_time(); ts->tv_sec = return mc146818_get_cmos_time();
ts->tv_nsec = 0;
} }
...@@ -100,9 +100,10 @@ static unsigned int __init estimate_cpu_frequency(void) ...@@ -100,9 +100,10 @@ static unsigned int __init estimate_cpu_frequency(void)
return count; return count;
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
return mc146818_get_cmos_time(); ts->tv_sec = mc146818_get_cmos_time();
ts->tv_nsec = 0;
} }
static void __init plat_perf_setup(void) static void __init plat_perf_setup(void)
......
...@@ -70,7 +70,7 @@ void __init bus_error_init(void) ...@@ -70,7 +70,7 @@ void __init bus_error_init(void)
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
unsigned int year, month, day, hour, min, sec; unsigned int year, month, day, hour, min, sec;
unsigned long flags; unsigned long flags;
...@@ -92,7 +92,8 @@ unsigned long read_persistent_clock(void) ...@@ -92,7 +92,8 @@ unsigned long read_persistent_clock(void)
m48t37_base->control = 0x00; m48t37_base->control = 0x00;
spin_unlock_irqrestore(&rtc_lock, flags); spin_unlock_irqrestore(&rtc_lock, flags);
return mktime(year, month, day, hour, min, sec); ts->tv_sec = mktime(year, month, day, hour, min, sec);
ts->tv_nsec = 0;
} }
int rtc_mips_set_time(unsigned long tim) int rtc_mips_set_time(unsigned long tim)
......
...@@ -87,19 +87,26 @@ enum swarm_rtc_type { ...@@ -87,19 +87,26 @@ enum swarm_rtc_type {
enum swarm_rtc_type swarm_rtc_type; enum swarm_rtc_type swarm_rtc_type;
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
unsigned long sec;
switch (swarm_rtc_type) { switch (swarm_rtc_type) {
case RTC_XICOR: case RTC_XICOR:
return xicor_get_time(); sec = xicor_get_time();
break;
case RTC_M4LT81: case RTC_M4LT81:
return m41t81_get_time(); sec = m41t81_get_time();
break;
case RTC_NONE: case RTC_NONE:
default: default:
return mktime(2000, 1, 1, 0, 0, 0); sec = mktime(2000, 1, 1, 0, 0, 0);
break;
} }
ts->tv_sec = sec;
tv->tv_nsec = 0;
} }
int rtc_mips_set_time(unsigned long sec) int rtc_mips_set_time(unsigned long sec)
......
...@@ -182,7 +182,8 @@ void __init plat_time_init(void) ...@@ -182,7 +182,8 @@ void __init plat_time_init(void)
setup_pit_timer(); setup_pit_timer();
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
return -1; ts->tv_sec = -1;
ts->tv_nsec = 0;
} }
...@@ -774,11 +774,12 @@ int update_persistent_clock(struct timespec now) ...@@ -774,11 +774,12 @@ int update_persistent_clock(struct timespec now)
return ppc_md.set_rtc_time(&tm); return ppc_md.set_rtc_time(&tm);
} }
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
struct rtc_time tm; struct rtc_time tm;
static int first = 1; static int first = 1;
ts->tv_nsec = 0;
/* XXX this is a litle fragile but will work okay in the short term */ /* XXX this is a litle fragile but will work okay in the short term */
if (first) { if (first) {
first = 0; first = 0;
...@@ -786,14 +787,18 @@ unsigned long read_persistent_clock(void) ...@@ -786,14 +787,18 @@ unsigned long read_persistent_clock(void)
timezone_offset = ppc_md.time_init(); timezone_offset = ppc_md.time_init();
/* get_boot_time() isn't guaranteed to be safe to call late */ /* get_boot_time() isn't guaranteed to be safe to call late */
if (ppc_md.get_boot_time) if (ppc_md.get_boot_time) {
return ppc_md.get_boot_time() -timezone_offset; ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
return;
}
}
if (!ppc_md.get_rtc_time) {
ts->tv_sec = 0;
return;
} }
if (!ppc_md.get_rtc_time)
return 0;
ppc_md.get_rtc_time(&tm); ppc_md.get_rtc_time(&tm);
return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec); tm.tm_hour, tm.tm_min, tm.tm_sec);
} }
/* clocksource code */ /* clocksource code */
......
...@@ -184,12 +184,14 @@ static void timing_alert_interrupt(__u16 code) ...@@ -184,12 +184,14 @@ static void timing_alert_interrupt(__u16 code)
static void etr_reset(void); static void etr_reset(void);
static void stp_reset(void); static void stp_reset(void);
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
struct timespec ts; tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
}
tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts); void read_boot_clock(struct timespec *ts)
return ts.tv_sec; {
tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
} }
static cycle_t read_tod_clock(struct clocksource *cs) static cycle_t read_tod_clock(struct clocksource *cs)
...@@ -207,6 +209,10 @@ static struct clocksource clocksource_tod = { ...@@ -207,6 +209,10 @@ static struct clocksource clocksource_tod = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
struct clocksource * __init clocksource_default_clock(void)
{
return &clocksource_tod;
}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{ {
...@@ -244,10 +250,6 @@ void update_vsyscall_tz(void) ...@@ -244,10 +250,6 @@ void update_vsyscall_tz(void)
*/ */
void __init time_init(void) void __init time_init(void)
{ {
struct timespec ts;
unsigned long flags;
cycle_t now;
/* Reset time synchronization interfaces. */ /* Reset time synchronization interfaces. */
etr_reset(); etr_reset();
stp_reset(); stp_reset();
...@@ -263,26 +265,6 @@ void __init time_init(void) ...@@ -263,26 +265,6 @@ void __init time_init(void)
if (clocksource_register(&clocksource_tod) != 0) if (clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source"); panic("Could not register TOD clock source");
/*
* The TOD clock is an accurate clock. The xtime should be
* initialized in a way that the difference between TOD and
* xtime is reasonably small. Too bad that timekeeping_init
* sets xtime.tv_nsec to zero. In addition the clock source
* change from the jiffies clock source to the TOD clock
* source add another error of up to 1/HZ second. The same
* function sets wall_to_monotonic to a value that is too
* small for /proc/uptime to be accurate.
* Reset xtime and wall_to_monotonic to sane values.
*/
write_seqlock_irqsave(&xtime_lock, flags);
now = get_clock();
tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
clocksource_tod.cycle_last = now;
clocksource_tod.raw_time = xtime;
tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
write_sequnlock_irqrestore(&xtime_lock, flags);
/* Enable TOD clock interrupts on the boot cpu. */ /* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer(); init_cpu_timer();
......
...@@ -39,11 +39,9 @@ void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; ...@@ -39,11 +39,9 @@ void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
#ifdef CONFIG_GENERIC_CMOS_UPDATE #ifdef CONFIG_GENERIC_CMOS_UPDATE
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
struct timespec tv; rtc_sh_get_time(ts);
rtc_sh_get_time(&tv);
return tv.tv_sec;
} }
int update_persistent_clock(struct timespec now) int update_persistent_clock(struct timespec now)
......
...@@ -21,6 +21,7 @@ struct vsyscall_gtod_data { ...@@ -21,6 +21,7 @@ struct vsyscall_gtod_data {
u32 shift; u32 shift;
} clock; } clock;
struct timespec wall_to_monotonic; struct timespec wall_to_monotonic;
struct timespec wall_time_coarse;
}; };
extern struct vsyscall_gtod_data __vsyscall_gtod_data extern struct vsyscall_gtod_data __vsyscall_gtod_data
__section_vsyscall_gtod_data; __section_vsyscall_gtod_data;
......
...@@ -19,12 +19,6 @@ ...@@ -19,12 +19,6 @@
DEFINE_SPINLOCK(i8253_lock); DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock); EXPORT_SYMBOL(i8253_lock);
#ifdef CONFIG_X86_32
static void pit_disable_clocksource(void);
#else
static inline void pit_disable_clocksource(void) { }
#endif
/* /*
* HPET replaces the PIT, when enabled. So we need to know, which of * HPET replaces the PIT, when enabled. So we need to know, which of
* the two timers is used * the two timers is used
...@@ -57,12 +51,10 @@ static void init_pit_timer(enum clock_event_mode mode, ...@@ -57,12 +51,10 @@ static void init_pit_timer(enum clock_event_mode mode,
outb_pit(0, PIT_CH0); outb_pit(0, PIT_CH0);
outb_pit(0, PIT_CH0); outb_pit(0, PIT_CH0);
} }
pit_disable_clocksource();
break; break;
case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_ONESHOT:
/* One shot setup */ /* One shot setup */
pit_disable_clocksource();
outb_pit(0x38, PIT_MODE); outb_pit(0x38, PIT_MODE);
break; break;
...@@ -200,17 +192,6 @@ static struct clocksource pit_cs = { ...@@ -200,17 +192,6 @@ static struct clocksource pit_cs = {
.shift = 20, .shift = 20,
}; };
static void pit_disable_clocksource(void)
{
/*
* Use mult to check whether it is registered or not
*/
if (pit_cs.mult) {
clocksource_unregister(&pit_cs);
pit_cs.mult = 0;
}
}
static int __init init_pit_clocksource(void) static int __init init_pit_clocksource(void)
{ {
/* /*
......
...@@ -178,7 +178,7 @@ static int set_rtc_mmss(unsigned long nowtime) ...@@ -178,7 +178,7 @@ static int set_rtc_mmss(unsigned long nowtime)
} }
/* not static: needed by APM */ /* not static: needed by APM */
unsigned long read_persistent_clock(void) void read_persistent_clock(struct timespec *ts)
{ {
unsigned long retval, flags; unsigned long retval, flags;
...@@ -186,7 +186,8 @@ unsigned long read_persistent_clock(void) ...@@ -186,7 +186,8 @@ unsigned long read_persistent_clock(void)
retval = get_wallclock(); retval = get_wallclock();
spin_unlock_irqrestore(&rtc_lock, flags); spin_unlock_irqrestore(&rtc_lock, flags);
return retval; ts->tv_sec = retval;
ts->tv_nsec = 0;
} }
int update_persistent_clock(struct timespec now) int update_persistent_clock(struct timespec now)
......
...@@ -744,10 +744,16 @@ static cycle_t __vsyscall_fn vread_tsc(void) ...@@ -744,10 +744,16 @@ static cycle_t __vsyscall_fn vread_tsc(void)
} }
#endif #endif
static void resume_tsc(void)
{
clocksource_tsc.cycle_last = 0;
}
static struct clocksource clocksource_tsc = { static struct clocksource clocksource_tsc = {
.name = "tsc", .name = "tsc",
.rating = 300, .rating = 300,
.read = read_tsc, .read = read_tsc,
.resume = resume_tsc,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.shift = 22, .shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
...@@ -761,12 +767,14 @@ void mark_tsc_unstable(char *reason) ...@@ -761,12 +767,14 @@ void mark_tsc_unstable(char *reason)
{ {
if (!tsc_unstable) { if (!tsc_unstable) {
tsc_unstable = 1; tsc_unstable = 1;
printk("Marking TSC unstable due to %s\n", reason); printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */ /* Change only the rating, when not registered */
if (clocksource_tsc.mult) if (clocksource_tsc.mult)
clocksource_change_rating(&clocksource_tsc, 0); clocksource_mark_unstable(&clocksource_tsc);
else else {
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
clocksource_tsc.rating = 0; clocksource_tsc.rating = 0;
}
} }
} }
......
...@@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) ...@@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
} }
......
...@@ -86,14 +86,47 @@ notrace static noinline int do_monotonic(struct timespec *ts) ...@@ -86,14 +86,47 @@ notrace static noinline int do_monotonic(struct timespec *ts)
return 0; return 0;
} }
notrace static noinline int do_realtime_coarse(struct timespec *ts)
{
unsigned long seq;
do {
seq = read_seqbegin(&gtod->lock);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
} while (unlikely(read_seqretry(&gtod->lock, seq)));
return 0;
}
notrace static noinline int do_monotonic_coarse(struct timespec *ts)
{
unsigned long seq, ns, secs;
do {
seq = read_seqbegin(&gtod->lock);
secs = gtod->wall_time_coarse.tv_sec;
ns = gtod->wall_time_coarse.tv_nsec;
secs += gtod->wall_to_monotonic.tv_sec;
ns += gtod->wall_to_monotonic.tv_nsec;
} while (unlikely(read_seqretry(&gtod->lock, seq)));
vset_normalized_timespec(ts, secs, ns);
return 0;
}
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{ {
if (likely(gtod->sysctl_enabled && gtod->clock.vread)) if (likely(gtod->sysctl_enabled))
switch (clock) { switch (clock) {
case CLOCK_REALTIME: case CLOCK_REALTIME:
return do_realtime(ts); if (likely(gtod->clock.vread))
return do_realtime(ts);
break;
case CLOCK_MONOTONIC: case CLOCK_MONOTONIC:
return do_monotonic(ts); if (likely(gtod->clock.vread))
return do_monotonic(ts);
break;
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(ts);
} }
return vdso_fallback_gettime(clock, ts); return vdso_fallback_gettime(clock, ts);
} }
......
...@@ -59,9 +59,8 @@ static struct irqaction timer_irqaction = { ...@@ -59,9 +59,8 @@ static struct irqaction timer_irqaction = {
void __init time_init(void) void __init time_init(void)
{ {
xtime.tv_nsec = 0; /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */
xtime.tv_sec = read_persistent_clock(); read_persistent_clock(&xtime);
set_normalized_timespec(&wall_to_monotonic, set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec); -xtime.tv_sec, -xtime.tv_nsec);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/init.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -148,14 +149,11 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, ...@@ -148,14 +149,11 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @disable: optional function to disable the clocksource * @disable: optional function to disable the clocksource
* @mask: bitmask for two's complement * @mask: bitmask for two's complement
* subtraction of non 64 bit counters * subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier (adjusted by NTP) * @mult: cycle to nanosecond multiplier
* @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
* @shift: cycle to nanosecond divisor (power of two) * @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties * @flags: flags describing special properties
* @vread: vsyscall based read * @vread: vsyscall based read
* @resume: resume function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary
* @cycle_interval: Used internally by timekeeping core, please ignore.
* @xtime_interval: Used internally by timekeeping core, please ignore.
*/ */
struct clocksource { struct clocksource {
/* /*
...@@ -169,7 +167,6 @@ struct clocksource { ...@@ -169,7 +167,6 @@ struct clocksource {
void (*disable)(struct clocksource *cs); void (*disable)(struct clocksource *cs);
cycle_t mask; cycle_t mask;
u32 mult; u32 mult;
u32 mult_orig;
u32 shift; u32 shift;
unsigned long flags; unsigned long flags;
cycle_t (*vread)(void); cycle_t (*vread)(void);
...@@ -181,19 +178,12 @@ struct clocksource { ...@@ -181,19 +178,12 @@ struct clocksource {
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
#endif #endif
/* timekeeping specific data, ignore */
cycle_t cycle_interval;
u64 xtime_interval;
u32 raw_interval;
/* /*
* Second part is written at each timer interrupt * Second part is written at each timer interrupt
* Keep it in a different cache line to dirty no * Keep it in a different cache line to dirty no
* more than one cache line. * more than one cache line.
*/ */
cycle_t cycle_last ____cacheline_aligned_in_smp; cycle_t cycle_last ____cacheline_aligned_in_smp;
u64 xtime_nsec;
s64 error;
struct timespec raw_time;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */ /* Watchdog related data, used by the framework */
...@@ -202,8 +192,6 @@ struct clocksource { ...@@ -202,8 +192,6 @@ struct clocksource {
#endif #endif
}; };
extern struct clocksource *clock; /* current clocksource */
/* /*
* Clock source flags bits:: * Clock source flags bits::
*/ */
...@@ -212,6 +200,7 @@ extern struct clocksource *clock; /* current clocksource */ ...@@ -212,6 +200,7 @@ extern struct clocksource *clock; /* current clocksource */
#define CLOCK_SOURCE_WATCHDOG 0x10 #define CLOCK_SOURCE_WATCHDOG 0x10
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 #define CLOCK_SOURCE_VALID_FOR_HRES 0x20
#define CLOCK_SOURCE_UNSTABLE 0x40
/* simplify initialization of mask field */ /* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
...@@ -268,108 +257,15 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) ...@@ -268,108 +257,15 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
} }
/** /**
* clocksource_read: - Access the clocksource's current cycle value * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
* @cs: pointer to clocksource being read
*
* Uses the clocksource to return the current cycle_t value
*/
static inline cycle_t clocksource_read(struct clocksource *cs)
{
return cs->read(cs);
}
/**
* clocksource_enable: - enable clocksource
* @cs: pointer to clocksource
*
* Enables the specified clocksource. The clocksource callback
* function should start up the hardware and setup mult and field
* members of struct clocksource to reflect hardware capabilities.
*/
static inline int clocksource_enable(struct clocksource *cs)
{
int ret = 0;
if (cs->enable)
ret = cs->enable(cs);
/*
* The frequency may have changed while the clocksource
* was disabled. If so the code in ->enable() must update
* the mult value to reflect the new frequency. Make sure
* mult_orig follows this change.
*/
cs->mult_orig = cs->mult;
return ret;
}
/**
* clocksource_disable: - disable clocksource
* @cs: pointer to clocksource
*
* Disables the specified clocksource. The clocksource callback
* function should power down the now unused hardware block to
* save power.
*/
static inline void clocksource_disable(struct clocksource *cs)
{
/*
* Save mult_orig in mult so clocksource_enable() can
* restore the value regardless if ->enable() updates
* the value of mult or not.
*/
cs->mult = cs->mult_orig;
if (cs->disable)
cs->disable(cs);
}
/**
* cyc2ns - converts clocksource cycles to nanoseconds
* @cs: Pointer to clocksource
* @cycles: Cycles
* *
* Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. * Converts cycles to nanoseconds, using the given mult and shift.
* *
* XXX - This could use some mult_lxl_ll() asm optimization * XXX - This could use some mult_lxl_ll() asm optimization
*/ */
static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
{
u64 ret = (u64)cycles;
ret = (ret * cs->mult) >> cs->shift;
return ret;
}
/**
* clocksource_calculate_interval - Calculates a clocksource interval struct
*
* @c: Pointer to clocksource.
* @length_nsec: Desired interval length in nanoseconds.
*
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
* pair and interval request.
*
* Unless you're the timekeeping code, you should not be using this!
*/
static inline void clocksource_calculate_interval(struct clocksource *c,
unsigned long length_nsec)
{ {
u64 tmp; return ((u64) cycles * mult) >> shift;
/* Do the ns -> cycle conversion first, using original mult */
tmp = length_nsec;
tmp <<= c->shift;
tmp += c->mult_orig/2;
do_div(tmp, c->mult_orig);
c->cycle_interval = (cycle_t)tmp;
if (c->cycle_interval == 0)
c->cycle_interval = 1;
/* Go back from cycles -> shifted ns, this time use ntp adjused mult */
c->xtime_interval = (u64)c->cycle_interval * c->mult;
c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
} }
...@@ -380,6 +276,8 @@ extern void clocksource_touch_watchdog(void); ...@@ -380,6 +276,8 @@ extern void clocksource_touch_watchdog(void);
extern struct clocksource* clocksource_get_next(void); extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_resume(void); extern void clocksource_resume(void);
extern struct clocksource * __init __weak clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL #ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c); extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
...@@ -394,4 +292,6 @@ static inline void update_vsyscall_tz(void) ...@@ -394,4 +292,6 @@ static inline void update_vsyscall_tz(void)
} }
#endif #endif
extern void timekeeping_notify(struct clocksource *clock);
#endif /* _LINUX_CLOCKSOURCE_H */ #endif /* _LINUX_CLOCKSOURCE_H */
...@@ -91,7 +91,6 @@ enum hrtimer_restart { ...@@ -91,7 +91,6 @@ enum hrtimer_restart {
* @function: timer expiry callback function * @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock) * @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above) * @state: state information (See bit values above)
* @cb_entry: list head to enqueue an expired timer into the callback list
* @start_site: timer statistics field to store the site where the timer * @start_site: timer statistics field to store the site where the timer
* was started * was started
* @start_comm: timer statistics field to store the name of the process which * @start_comm: timer statistics field to store the name of the process which
...@@ -108,7 +107,6 @@ struct hrtimer { ...@@ -108,7 +107,6 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *); enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
unsigned long state; unsigned long state;
struct list_head cb_entry;
#ifdef CONFIG_TIMER_STATS #ifdef CONFIG_TIMER_STATS
int start_pid; int start_pid;
void *start_site; void *start_site;
......
...@@ -75,7 +75,7 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, ...@@ -75,7 +75,7 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour, const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec); const unsigned int min, const unsigned int sec);
extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
extern struct timespec timespec_add_safe(const struct timespec lhs, extern struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs); const struct timespec rhs);
...@@ -101,7 +101,8 @@ extern struct timespec xtime; ...@@ -101,7 +101,8 @@ extern struct timespec xtime;
extern struct timespec wall_to_monotonic; extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock; extern seqlock_t xtime_lock;
extern unsigned long read_persistent_clock(void); extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
extern int update_persistent_clock(struct timespec now); extern int update_persistent_clock(struct timespec now);
extern int no_sync_cmos_clock __read_mostly; extern int no_sync_cmos_clock __read_mostly;
void timekeeping_init(void); void timekeeping_init(void);
...@@ -109,6 +110,8 @@ extern int timekeeping_suspended; ...@@ -109,6 +110,8 @@ extern int timekeeping_suspended;
unsigned long get_seconds(void); unsigned long get_seconds(void);
struct timespec current_kernel_time(void); struct timespec current_kernel_time(void);
struct timespec __current_kernel_time(void); /* does not hold xtime_lock */
struct timespec get_monotonic_coarse(void);
#define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
...@@ -147,6 +150,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); ...@@ -147,6 +150,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void); extern int timekeeping_valid_for_hres(void);
extern void update_wall_time(void); extern void update_wall_time(void);
extern void update_xtime_cache(u64 nsec); extern void update_xtime_cache(u64 nsec);
extern void timekeeping_leap_insert(int leapsecond);
struct tms; struct tms;
extern void do_sys_times(struct tms *); extern void do_sys_times(struct tms *);
...@@ -241,6 +245,8 @@ struct itimerval { ...@@ -241,6 +245,8 @@ struct itimerval {
#define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3 #define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4 #define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
/* /*
* The IDs of various hardware clocks: * The IDs of various hardware clocks:
......
...@@ -173,11 +173,6 @@ extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); ...@@ -173,11 +173,6 @@ extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
*/ */
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
/*
* Return when the next timer-wheel timeout occurs (in absolute jiffies),
* locks the timer base:
*/
extern unsigned long next_timer_interrupt(void);
/* /*
* Return when the next timer-wheel timeout occurs (in absolute jiffies), * Return when the next timer-wheel timeout occurs (in absolute jiffies),
* locks the timer base and does the comparison against the given * locks the timer base and does the comparison against the given
......
...@@ -48,37 +48,6 @@ ...@@ -48,37 +48,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
/**
* ktime_get - get the monotonic time in ktime_t format
*
* returns the time in ktime_t format
*/
ktime_t ktime_get(void)
{
struct timespec now;
ktime_get_ts(&now);
return timespec_to_ktime(now);
}
EXPORT_SYMBOL_GPL(ktime_get);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
*
* returns the time in ktime_t format
*/
ktime_t ktime_get_real(void)
{
struct timespec now;
getnstimeofday(&now);
return timespec_to_ktime(now);
}
EXPORT_SYMBOL_GPL(ktime_get_real);
/* /*
* The timer bases: * The timer bases:
* *
...@@ -106,31 +75,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = ...@@ -106,31 +75,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
} }
}; };
/**
* ktime_get_ts - get the monotonic clock in timespec format
* @ts: pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
* in normalized timespec format in the variable pointed to by @ts.
*/
void ktime_get_ts(struct timespec *ts)
{
struct timespec tomono;
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
getnstimeofday(ts);
tomono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
}
EXPORT_SYMBOL_GPL(ktime_get_ts);
/* /*
* Get the coarse grained time at the softirq based on xtime and * Get the coarse grained time at the softirq based on xtime and
* wall_to_monotonic. * wall_to_monotonic.
...@@ -1155,7 +1099,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, ...@@ -1155,7 +1099,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
clock_id = CLOCK_MONOTONIC; clock_id = CLOCK_MONOTONIC;
timer->base = &cpu_base->clock_base[clock_id]; timer->base = &cpu_base->clock_base[clock_id];
INIT_LIST_HEAD(&timer->cb_entry);
hrtimer_init_timer_hres(timer); hrtimer_init_timer_hres(timer);
#ifdef CONFIG_TIMER_STATS #ifdef CONFIG_TIMER_STATS
......
...@@ -242,6 +242,25 @@ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) ...@@ -242,6 +242,25 @@ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
return 0; return 0;
} }
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
{
*tp = current_kernel_time();
return 0;
}
static int posix_get_monotonic_coarse(clockid_t which_clock,
struct timespec *tp)
{
*tp = get_monotonic_coarse();
return 0;
}
int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
{
*tp = ktime_to_timespec(KTIME_LOW_RES);
return 0;
}
/* /*
* Initialize everything, well, just everything in Posix clocks/timers ;) * Initialize everything, well, just everything in Posix clocks/timers ;)
*/ */
...@@ -262,10 +281,26 @@ static __init int init_posix_timers(void) ...@@ -262,10 +281,26 @@ static __init int init_posix_timers(void)
.timer_create = no_timer_create, .timer_create = no_timer_create,
.nsleep = no_nsleep, .nsleep = no_nsleep,
}; };
struct k_clock clock_realtime_coarse = {
.clock_getres = posix_get_coarse_res,
.clock_get = posix_get_realtime_coarse,
.clock_set = do_posix_clock_nosettime,
.timer_create = no_timer_create,
.nsleep = no_nsleep,
};
struct k_clock clock_monotonic_coarse = {
.clock_getres = posix_get_coarse_res,
.clock_get = posix_get_monotonic_coarse,
.clock_set = do_posix_clock_nosettime,
.timer_create = no_timer_create,
.nsleep = no_nsleep,
};
register_posix_clock(CLOCK_REALTIME, &clock_realtime); register_posix_clock(CLOCK_REALTIME, &clock_realtime);
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
posix_timers_cache = kmem_cache_create("posix_timers_cache", posix_timers_cache = kmem_cache_create("posix_timers_cache",
sizeof (struct k_itimer), 0, SLAB_PANIC, sizeof (struct k_itimer), 0, SLAB_PANIC,
......
...@@ -370,13 +370,20 @@ EXPORT_SYMBOL(mktime); ...@@ -370,13 +370,20 @@ EXPORT_SYMBOL(mktime);
* 0 <= tv_nsec < NSEC_PER_SEC * 0 <= tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative ! * For negative values only the tv_sec field is negative !
*/ */
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
{ {
while (nsec >= NSEC_PER_SEC) { while (nsec >= NSEC_PER_SEC) {
/*
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC; nsec -= NSEC_PER_SEC;
++sec; ++sec;
} }
while (nsec < 0) { while (nsec < 0) {
asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC; nsec += NSEC_PER_SEC;
--sec; --sec;
} }
......
This diff is collapsed.
...@@ -61,7 +61,6 @@ struct clocksource clocksource_jiffies = { ...@@ -61,7 +61,6 @@ struct clocksource clocksource_jiffies = {
.read = jiffies_read, .read = jiffies_read,
.mask = 0xffffffff, /*32bits*/ .mask = 0xffffffff, /*32bits*/
.mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
.mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT,
.shift = JIFFIES_SHIFT, .shift = JIFFIES_SHIFT,
}; };
...@@ -71,3 +70,8 @@ static int __init init_jiffies_clocksource(void) ...@@ -71,3 +70,8 @@ static int __init init_jiffies_clocksource(void)
} }
core_initcall(init_jiffies_clocksource); core_initcall(init_jiffies_clocksource);
struct clocksource * __init __weak clocksource_default_clock(void)
{
return &clocksource_jiffies;
}
...@@ -194,8 +194,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) ...@@ -194,8 +194,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
case TIME_OK: case TIME_OK:
break; break;
case TIME_INS: case TIME_INS:
xtime.tv_sec--; timekeeping_leap_insert(-1);
wall_to_monotonic.tv_sec++;
time_state = TIME_OOP; time_state = TIME_OOP;
printk(KERN_NOTICE printk(KERN_NOTICE
"Clock: inserting leap second 23:59:60 UTC\n"); "Clock: inserting leap second 23:59:60 UTC\n");
...@@ -203,9 +202,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) ...@@ -203,9 +202,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
res = HRTIMER_RESTART; res = HRTIMER_RESTART;
break; break;
case TIME_DEL: case TIME_DEL:
xtime.tv_sec++; timekeeping_leap_insert(1);
time_tai--; time_tai--;
wall_to_monotonic.tv_sec--;
time_state = TIME_WAIT; time_state = TIME_WAIT;
printk(KERN_NOTICE printk(KERN_NOTICE
"Clock: deleting leap second 23:59:59 UTC\n"); "Clock: deleting leap second 23:59:59 UTC\n");
...@@ -219,7 +217,6 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) ...@@ -219,7 +217,6 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
time_state = TIME_OK; time_state = TIME_OK;
break; break;
} }
update_vsyscall(&xtime, clock);
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
......
This diff is collapsed.
...@@ -72,6 +72,7 @@ struct tvec_base { ...@@ -72,6 +72,7 @@ struct tvec_base {
spinlock_t lock; spinlock_t lock;
struct timer_list *running_timer; struct timer_list *running_timer;
unsigned long timer_jiffies; unsigned long timer_jiffies;
unsigned long next_timer;
struct tvec_root tv1; struct tvec_root tv1;
struct tvec tv2; struct tvec tv2;
struct tvec tv3; struct tvec tv3;
...@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, ...@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
if (timer_pending(timer)) { if (timer_pending(timer)) {
detach_timer(timer, 0); detach_timer(timer, 0);
if (timer->expires == base->next_timer &&
!tbase_get_deferrable(timer->base))
base->next_timer = base->timer_jiffies;
ret = 1; ret = 1;
} else { } else {
if (pending_only) if (pending_only)
...@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, ...@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
} }
timer->expires = expires; timer->expires = expires;
if (time_before(timer->expires, base->next_timer) &&
!tbase_get_deferrable(timer->base))
base->next_timer = timer->expires;
internal_add_timer(base, timer); internal_add_timer(base, timer);
out_unlock: out_unlock:
...@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *timer, int cpu) ...@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *timer, int cpu)
spin_lock_irqsave(&base->lock, flags); spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base); timer_set_base(timer, base);
debug_timer_activate(timer); debug_timer_activate(timer);
if (time_before(timer->expires, base->next_timer) &&
!tbase_get_deferrable(timer->base))
base->next_timer = timer->expires;
internal_add_timer(base, timer); internal_add_timer(base, timer);
/* /*
* Check whether the other CPU is idle and needs to be * Check whether the other CPU is idle and needs to be
...@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer) ...@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer)
base = lock_timer_base(timer, &flags); base = lock_timer_base(timer, &flags);
if (timer_pending(timer)) { if (timer_pending(timer)) {
detach_timer(timer, 1); detach_timer(timer, 1);
if (timer->expires == base->next_timer &&
!tbase_get_deferrable(timer->base))
base->next_timer = base->timer_jiffies;
ret = 1; ret = 1;
} }
spin_unlock_irqrestore(&base->lock, flags); spin_unlock_irqrestore(&base->lock, flags);
...@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_list *timer) ...@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_list *timer)
ret = 0; ret = 0;
if (timer_pending(timer)) { if (timer_pending(timer)) {
detach_timer(timer, 1); detach_timer(timer, 1);
if (timer->expires == base->next_timer &&
!tbase_get_deferrable(timer->base))
base->next_timer = base->timer_jiffies;
ret = 1; ret = 1;
} }
out: out:
...@@ -1007,8 +1023,8 @@ static inline void __run_timers(struct tvec_base *base) ...@@ -1007,8 +1023,8 @@ static inline void __run_timers(struct tvec_base *base)
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
/* /*
* Find out when the next timer event is due to happen. This * Find out when the next timer event is due to happen. This
* is used on S/390 to stop all activity when a cpus is idle. * is used on S/390 to stop all activity when a CPU is idle.
* This functions needs to be called disabled. * This function needs to be called with interrupts disabled.
*/ */
static unsigned long __next_timer_interrupt(struct tvec_base *base) static unsigned long __next_timer_interrupt(struct tvec_base *base)
{ {
...@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(unsigned long now) ...@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(unsigned long now)
unsigned long expires; unsigned long expires;
spin_lock(&base->lock); spin_lock(&base->lock);
expires = __next_timer_interrupt(base); if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
expires = base->next_timer;
spin_unlock(&base->lock); spin_unlock(&base->lock);
if (time_before_eq(expires, now)) if (time_before_eq(expires, now))
...@@ -1522,6 +1540,7 @@ static int __cpuinit init_timers_cpu(int cpu) ...@@ -1522,6 +1540,7 @@ static int __cpuinit init_timers_cpu(int cpu)
INIT_LIST_HEAD(base->tv1.vec + j); INIT_LIST_HEAD(base->tv1.vec + j);
base->timer_jiffies = jiffies; base->timer_jiffies = jiffies;
base->next_timer = base->timer_jiffies;
return 0; return 0;
} }
...@@ -1534,6 +1553,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea ...@@ -1534,6 +1553,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
timer = list_first_entry(head, struct timer_list, entry); timer = list_first_entry(head, struct timer_list, entry);
detach_timer(timer, 0); detach_timer(timer, 0);
timer_set_base(timer, new_base); timer_set_base(timer, new_base);
if (time_before(timer->expires, new_base->next_timer) &&
!tbase_get_deferrable(timer->base))
new_base->next_timer = timer->expires;
internal_add_timer(new_base, timer); internal_add_timer(new_base, timer);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment