Commit 7fd56474 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Ingo Molnar:
 "The main changes in this cycle were:

   - clockevents state machine cleanups and enhancements (Viresh Kumar)

   - clockevents broadcast notifier horror to state machine conversion
     and related cleanups (Thomas Gleixner, Rafael J Wysocki)

   - clocksource and timekeeping core updates (John Stultz)

   - clocksource driver updates and fixes (Ben Dooks, Dmitry Osipenko,
     Hans de Goede, Laurent Pinchart, Maxime Ripard, Xunlei Pang)

   - y2038 fixes (Xunlei Pang, John Stultz)

   - NMI-safe ktime_get_raw_fast() and general refactoring of the clock
     code, in preparation to perf's per event clock ID support (Peter
     Zijlstra)

   - generic sched/clock fixes, optimizations and cleanups (Daniel
     Thompson)

   - clockevents cpu_down() race fix (Preeti U Murthy)"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (94 commits)
  timers/PM: Drop unnecessary braces from tick_freeze()
  timers/PM: Fix up tick_unfreeze()
  timekeeping: Get rid of stale comment
  clockevents: Cleanup dead cpu explicitely
  clockevents: Make tick handover explicit
  clockevents: Remove broadcast oneshot control leftovers
  sched/idle: Use explicit broadcast oneshot control function
  ARM: Tegra: Use explicit broadcast oneshot control function
  ARM: OMAP: Use explicit broadcast oneshot control function
  intel_idle: Use explicit broadcast oneshot control function
  ACPI/idle: Use explicit broadcast control function
  ACPI/PAD: Use explicit broadcast oneshot control function
  x86/amd/idle, clockevents: Use explicit broadcast oneshot control functions
  clockevents: Provide explicit broadcast oneshot control functions
  clockevents: Remove the broadcast control leftovers
  ARM: OMAP: Use explicit broadcast control function
  intel_idle: Use explicit broadcast control function
  cpuidle: Use explicit broadcast control function
  ACPI/processor: Use explicit broadcast control function
  ACPI/PAD: Use explicit broadcast control function
  ...
parents 49d2953c def74708
...@@ -116,7 +116,7 @@ alpha_rtc_set_time(struct device *dev, struct rtc_time *tm) ...@@ -116,7 +116,7 @@ alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
} }
static int static int
alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime) alpha_rtc_set_mmss(struct device *dev, time64_t nowtime)
{ {
int retval = 0; int retval = 0;
int real_seconds, real_minutes, cmos_minutes; int real_seconds, real_minutes, cmos_minutes;
...@@ -211,7 +211,7 @@ alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) ...@@ -211,7 +211,7 @@ alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
static const struct rtc_class_ops alpha_rtc_ops = { static const struct rtc_class_ops alpha_rtc_ops = {
.read_time = alpha_rtc_read_time, .read_time = alpha_rtc_read_time,
.set_time = alpha_rtc_set_time, .set_time = alpha_rtc_set_time,
.set_mmss = alpha_rtc_set_mmss, .set_mmss64 = alpha_rtc_set_mmss,
.ioctl = alpha_rtc_ioctl, .ioctl = alpha_rtc_ioctl,
}; };
...@@ -276,7 +276,7 @@ do_remote_mmss(void *data) ...@@ -276,7 +276,7 @@ do_remote_mmss(void *data)
} }
static int static int
remote_set_mmss(struct device *dev, unsigned long now) remote_set_mmss(struct device *dev, time64_t now)
{ {
union remote_data x; union remote_data x;
if (smp_processor_id() != boot_cpuid) { if (smp_processor_id() != boot_cpuid) {
...@@ -290,7 +290,7 @@ remote_set_mmss(struct device *dev, unsigned long now) ...@@ -290,7 +290,7 @@ remote_set_mmss(struct device *dev, unsigned long now)
static const struct rtc_class_ops remote_rtc_ops = { static const struct rtc_class_ops remote_rtc_ops = {
.read_time = remote_read_time, .read_time = remote_read_time,
.set_time = remote_set_time, .set_time = remote_set_time,
.set_mmss = remote_set_mmss, .set_mmss64 = remote_set_mmss,
.ioctl = alpha_rtc_ioctl, .ioctl = alpha_rtc_ioctl,
}; };
#endif #endif
......
...@@ -151,8 +151,6 @@ static int bL_switch_to(unsigned int new_cluster_id) ...@@ -151,8 +151,6 @@ static int bL_switch_to(unsigned int new_cluster_id)
unsigned int mpidr, this_cpu, that_cpu; unsigned int mpidr, this_cpu, that_cpu;
unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
struct completion inbound_alive; struct completion inbound_alive;
struct tick_device *tdev;
enum clock_event_mode tdev_mode;
long volatile *handshake_ptr; long volatile *handshake_ptr;
int ipi_nr, ret; int ipi_nr, ret;
...@@ -219,13 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id) ...@@ -219,13 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
/* redirect GIC's SGIs to our counterpart */ /* redirect GIC's SGIs to our counterpart */
gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
tdev = tick_get_device(this_cpu); tick_suspend_local();
if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
tdev = NULL;
if (tdev) {
tdev_mode = tdev->evtdev->mode;
clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
}
ret = cpu_pm_enter(); ret = cpu_pm_enter();
...@@ -251,11 +243,7 @@ static int bL_switch_to(unsigned int new_cluster_id) ...@@ -251,11 +243,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
ret = cpu_pm_exit(); ret = cpu_pm_exit();
if (tdev) { tick_resume_local();
clockevents_set_mode(tdev->evtdev, tdev_mode);
clockevents_program_event(tdev->evtdev,
tdev->evtdev->next_event, 1);
}
trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
local_fiq_enable(); local_fiq_enable();
......
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
extern void timer_tick(void); extern void timer_tick(void);
struct timespec; typedef void (*clock_access_fn)(struct timespec64 *);
typedef void (*clock_access_fn)(struct timespec *);
extern int register_persistent_clock(clock_access_fn read_boot, extern int register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent); clock_access_fn read_persistent);
......
...@@ -76,7 +76,7 @@ void timer_tick(void) ...@@ -76,7 +76,7 @@ void timer_tick(void)
} }
#endif #endif
static void dummy_clock_access(struct timespec *ts) static void dummy_clock_access(struct timespec64 *ts)
{ {
ts->tv_sec = 0; ts->tv_sec = 0;
ts->tv_nsec = 0; ts->tv_nsec = 0;
...@@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts) ...@@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts)
static clock_access_fn __read_persistent_clock = dummy_clock_access; static clock_access_fn __read_persistent_clock = dummy_clock_access;
static clock_access_fn __read_boot_clock = dummy_clock_access;; static clock_access_fn __read_boot_clock = dummy_clock_access;;
void read_persistent_clock(struct timespec *ts) void read_persistent_clock64(struct timespec64 *ts)
{ {
__read_persistent_clock(ts); __read_persistent_clock(ts);
} }
void read_boot_clock(struct timespec *ts) void read_boot_clock64(struct timespec64 *ts)
{ {
__read_boot_clock(ts); __read_boot_clock(ts);
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
...@@ -84,7 +84,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -84,7 +84,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{ {
struct idle_statedata *cx = state_ptr + index; struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0; u32 mpuss_can_lose_context = 0;
int cpu_id = smp_processor_id();
/* /*
* CPU0 has to wait and stay ON until CPU1 is OFF state. * CPU0 has to wait and stay ON until CPU1 is OFF state.
...@@ -112,7 +111,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -112,7 +111,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF); (cx->mpu_logic_state == PWRDM_POWER_OFF);
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); tick_broadcast_enter();
/* /*
* Call idle CPU PM enter notifier chain so that * Call idle CPU PM enter notifier chain so that
...@@ -169,7 +168,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -169,7 +168,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0 && mpuss_can_lose_context) if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit(); cpu_cluster_pm_exit();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); tick_broadcast_exit();
fail: fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
...@@ -184,8 +183,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -184,8 +183,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
*/ */
static void omap_setup_broadcast_timer(void *arg) static void omap_setup_broadcast_timer(void *arg)
{ {
int cpu = smp_processor_id(); tick_broadcast_enable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
} }
static struct cpuidle_driver omap4_idle_driver = { static struct cpuidle_driver omap4_idle_driver = {
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
*/ */
#include <asm/firmware.h> #include <asm/firmware.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -44,7 +44,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, ...@@ -44,7 +44,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
tegra_set_cpu_in_lp2(); tegra_set_cpu_in_lp2();
cpu_pm_enter(); cpu_pm_enter();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); tick_broadcast_enter();
call_firmware_op(prepare_idle); call_firmware_op(prepare_idle);
...@@ -52,7 +52,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev, ...@@ -52,7 +52,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
if (call_firmware_op(do_idle, 0) == -ENOSYS) if (call_firmware_op(do_idle, 0) == -ENOSYS)
cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); tick_broadcast_exit();
cpu_pm_exit(); cpu_pm_exit();
tegra_clear_cpu_in_lp2(); tegra_clear_cpu_in_lp2();
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
*/ */
#include <linux/clk/tegra.h> #include <linux/clk/tegra.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -136,11 +136,11 @@ static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev, ...@@ -136,11 +136,11 @@ static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev,
if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready()) if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready())
return false; return false;
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); tick_broadcast_enter();
tegra_idle_lp2_last(); tegra_idle_lp2_last();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); tick_broadcast_exit();
if (cpu_online(1)) if (cpu_online(1))
tegra20_wake_cpu1_from_reset(); tegra20_wake_cpu1_from_reset();
...@@ -153,13 +153,13 @@ static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev, ...@@ -153,13 +153,13 @@ static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
{ {
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); tick_broadcast_enter();
cpu_suspend(0, tegra20_sleep_cpu_secondary_finish); cpu_suspend(0, tegra20_sleep_cpu_secondary_finish);
tegra20_cpu_clear_resettable(); tegra20_cpu_clear_resettable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); tick_broadcast_exit();
return true; return true;
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
*/ */
#include <linux/clk/tegra.h> #include <linux/clk/tegra.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -76,11 +76,11 @@ static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, ...@@ -76,11 +76,11 @@ static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
return false; return false;
} }
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); tick_broadcast_enter();
tegra_idle_lp2_last(); tegra_idle_lp2_last();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); tick_broadcast_exit();
return true; return true;
} }
...@@ -90,13 +90,13 @@ static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev, ...@@ -90,13 +90,13 @@ static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
{ {
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); tick_broadcast_enter();
smp_wmb(); smp_wmb();
cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); tick_broadcast_exit();
return true; return true;
} }
......
...@@ -44,24 +44,20 @@ static u64 notrace omap_32k_read_sched_clock(void) ...@@ -44,24 +44,20 @@ static u64 notrace omap_32k_read_sched_clock(void)
} }
/** /**
* omap_read_persistent_clock - Return time from a persistent clock. * omap_read_persistent_clock64 - Return time from a persistent clock.
* *
* Reads the time from a source which isn't disabled during PM, the * Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into * 32k sync timer. Convert the cycles elapsed since last read into
* nsecs and adds to a monotonically increasing timespec. * nsecs and adds to a monotonically increasing timespec64.
*/ */
static struct timespec persistent_ts; static struct timespec64 persistent_ts;
static cycles_t cycles; static cycles_t cycles;
static unsigned int persistent_mult, persistent_shift; static unsigned int persistent_mult, persistent_shift;
static DEFINE_SPINLOCK(read_persistent_clock_lock);
static void omap_read_persistent_clock(struct timespec *ts) static void omap_read_persistent_clock64(struct timespec64 *ts)
{ {
unsigned long long nsecs; unsigned long long nsecs;
cycles_t last_cycles; cycles_t last_cycles;
unsigned long flags;
spin_lock_irqsave(&read_persistent_clock_lock, flags);
last_cycles = cycles; last_cycles = cycles;
cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0; cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
...@@ -69,11 +65,9 @@ static void omap_read_persistent_clock(struct timespec *ts) ...@@ -69,11 +65,9 @@ static void omap_read_persistent_clock(struct timespec *ts)
nsecs = clocksource_cyc2ns(cycles - last_cycles, nsecs = clocksource_cyc2ns(cycles - last_cycles,
persistent_mult, persistent_shift); persistent_mult, persistent_shift);
timespec_add_ns(&persistent_ts, nsecs); timespec64_add_ns(&persistent_ts, nsecs);
*ts = persistent_ts; *ts = persistent_ts;
spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
} }
/** /**
...@@ -103,7 +97,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase) ...@@ -103,7 +97,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
/* /*
* 120000 rough estimate from the calculations in * 120000 rough estimate from the calculations in
* __clocksource_updatefreq_scale. * __clocksource_update_freq_scale.
*/ */
clocks_calc_mult_shift(&persistent_mult, &persistent_shift, clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
32768, NSEC_PER_SEC, 120000); 32768, NSEC_PER_SEC, 120000);
...@@ -116,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase) ...@@ -116,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
} }
sched_clock_register(omap_32k_read_sched_clock, 32, 32768); sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
register_persistent_clock(NULL, omap_read_persistent_clock); register_persistent_clock(NULL, omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0; return 0;
......
...@@ -200,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -200,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
void update_vsyscall(struct timekeeper *tk) void update_vsyscall(struct timekeeper *tk)
{ {
struct timespec xtime_coarse; struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count; ++vdso_data->tb_seq_count;
smp_wmb(); smp_wmb();
...@@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) { if (!use_syscall) {
vdso_data->cs_cycle_last = tk->tkr.cycle_last; vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
vdso_data->cs_mult = tk->tkr.mult; vdso_data->cs_mult = tk->tkr_mono.mult;
vdso_data->cs_shift = tk->tkr.shift; vdso_data->cs_shift = tk->tkr_mono.shift;
} }
smp_wmb(); smp_wmb();
......
...@@ -75,11 +75,11 @@ static int rtctmp; ...@@ -75,11 +75,11 @@ static int rtctmp;
int proc_dolasatrtc(struct ctl_table *table, int write, int proc_dolasatrtc(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos) void *buffer, size_t *lenp, loff_t *ppos)
{ {
struct timespec ts; struct timespec64 ts;
int r; int r;
if (!write) { if (!write) {
read_persistent_clock(&ts); read_persistent_clock64(&ts);
rtctmp = ts.tv_sec; rtctmp = ts.tv_sec;
/* check for time < 0 and set to 0 */ /* check for time < 0 and set to 0 */
if (rtctmp < 0) if (rtctmp < 0)
......
...@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk)
{ {
u64 nsecps; u64 nsecps;
if (tk->tkr.clock != &clocksource_tod) if (tk->tkr_mono.clock != &clocksource_tod)
return; return;
/* Make userspace gettimeofday spin until we're done. */ /* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count; ++vdso_data->tb_update_count;
smp_wmb(); smp_wmb();
vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
vdso_data->wtom_clock_sec = vdso_data->wtom_clock_sec =
tk->xtime_sec + tk->wall_to_monotonic.tv_sec; tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec + vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift); + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift; nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
while (vdso_data->wtom_clock_nsec >= nsecps) { while (vdso_data->wtom_clock_nsec >= nsecps) {
vdso_data->wtom_clock_nsec -= nsecps; vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++; vdso_data->wtom_clock_sec++;
...@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_coarse_sec = tk->xtime_sec; vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = vdso_data->xtime_coarse_nsec =
(long)(tk->tkr.xtime_nsec >> tk->tkr.shift); (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdso_data->wtom_coarse_sec = vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec = vdso_data->wtom_coarse_nsec =
...@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_coarse_sec++; vdso_data->wtom_coarse_sec++;
} }
vdso_data->tk_mult = tk->tkr.mult; vdso_data->tk_mult = tk->tkr_mono.mult;
vdso_data->tk_shift = tk->tkr.shift; vdso_data->tk_shift = tk->tkr_mono.shift;
smp_wmb(); smp_wmb();
++vdso_data->tb_update_count; ++vdso_data->tb_update_count;
} }
...@@ -283,7 +283,7 @@ void __init time_init(void) ...@@ -283,7 +283,7 @@ void __init time_init(void)
if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt)) if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406"); panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0) if (__clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source"); panic("Could not register TOD clock source");
/* Enable TOD clock interrupts on the boot cpu. */ /* Enable TOD clock interrupts on the boot cpu. */
......
...@@ -181,17 +181,13 @@ static struct clocksource timer_cs = { ...@@ -181,17 +181,13 @@ static struct clocksource timer_cs = {
.rating = 100, .rating = 100,
.read = timer_cs_read, .read = timer_cs_read,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.shift = 2,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
static __init int setup_timer_cs(void) static __init int setup_timer_cs(void)
{ {
timer_cs_enabled = 1; timer_cs_enabled = 1;
timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate, return clocksource_register_hz(&timer_cs, sparc_config.clock_rate);
timer_cs.shift);
return clocksource_register(&timer_cs);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -257,34 +257,34 @@ void update_vsyscall_tz(void) ...@@ -257,34 +257,34 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timekeeper *tk) void update_vsyscall(struct timekeeper *tk)
{ {
if (tk->tkr.clock != &cycle_counter_cs) if (tk->tkr_mono.clock != &cycle_counter_cs)
return; return;
write_seqcount_begin(&vdso_data->tb_seq); write_seqcount_begin(&vdso_data->tb_seq);
vdso_data->cycle_last = tk->tkr.cycle_last; vdso_data->cycle_last = tk->tkr_mono.cycle_last;
vdso_data->mask = tk->tkr.mask; vdso_data->mask = tk->tkr_mono.mask;
vdso_data->mult = tk->tkr.mult; vdso_data->mult = tk->tkr_mono.mult;
vdso_data->shift = tk->tkr.shift; vdso_data->shift = tk->tkr_mono.shift;
vdso_data->wall_time_sec = tk->xtime_sec; vdso_data->wall_time_sec = tk->xtime_sec;
vdso_data->wall_time_snsec = tk->tkr.xtime_nsec; vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdso_data->monotonic_time_sec = tk->xtime_sec vdso_data->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec; + tk->wall_to_monotonic.tv_sec;
vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec + ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->tkr.shift); << tk->tkr_mono.shift);
while (vdso_data->monotonic_time_snsec >= while (vdso_data->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr.shift)) { (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdso_data->monotonic_time_snsec -= vdso_data->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr.shift; ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdso_data->monotonic_time_sec++; vdso_data->monotonic_time_sec++;
} }
vdso_data->wall_time_coarse_sec = tk->xtime_sec; vdso_data->wall_time_coarse_sec = tk->xtime_sec;
vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
tk->tkr.shift); tk->tkr_mono.shift);
vdso_data->monotonic_time_coarse_sec = vdso_data->monotonic_time_coarse_sec =
vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/user-return-notifier.h> #include <linux/user-return-notifier.h>
#include <linux/dmi.h> #include <linux/dmi.h>
...@@ -378,14 +378,11 @@ static void amd_e400_idle(void) ...@@ -378,14 +378,11 @@ static void amd_e400_idle(void)
if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
cpumask_set_cpu(cpu, amd_e400_c1e_mask); cpumask_set_cpu(cpu, amd_e400_c1e_mask);
/* /* Force broadcast so ACPI can not interfere. */
* Force broadcast so ACPI can not interfere. tick_broadcast_force();
*/
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
&cpu);
pr_info("Switch to broadcast mode on CPU%d\n", cpu); pr_info("Switch to broadcast mode on CPU%d\n", cpu);
} }
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); tick_broadcast_enter();
default_idle(); default_idle();
...@@ -394,7 +391,7 @@ static void amd_e400_idle(void) ...@@ -394,7 +391,7 @@ static void amd_e400_idle(void)
* called with interrupts disabled. * called with interrupts disabled.
*/ */
local_irq_disable(); local_irq_disable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); tick_broadcast_exit();
local_irq_enable(); local_irq_enable();
} else } else
default_idle(); default_idle();
......
...@@ -31,30 +31,30 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -31,30 +31,30 @@ void update_vsyscall(struct timekeeper *tk)
gtod_write_begin(vdata); gtod_write_begin(vdata);
/* copy vsyscall data */ /* copy vsyscall data */
vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode; vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
vdata->cycle_last = tk->tkr.cycle_last; vdata->cycle_last = tk->tkr_mono.cycle_last;
vdata->mask = tk->tkr.mask; vdata->mask = tk->tkr_mono.mask;
vdata->mult = tk->tkr.mult; vdata->mult = tk->tkr_mono.mult;
vdata->shift = tk->tkr.shift; vdata->shift = tk->tkr_mono.shift;
vdata->wall_time_sec = tk->xtime_sec; vdata->wall_time_sec = tk->xtime_sec;
vdata->wall_time_snsec = tk->tkr.xtime_nsec; vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec; + tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->tkr.xtime_nsec vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec + ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->tkr.shift); << tk->tkr_mono.shift);
while (vdata->monotonic_time_snsec >= while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr.shift)) { (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -= vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr.shift; ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdata->monotonic_time_sec++; vdata->monotonic_time_sec++;
} }
vdata->wall_time_coarse_sec = tk->xtime_sec; vdata->wall_time_coarse_sec = tk->xtime_sec;
vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
tk->tkr.shift); tk->tkr_mono.shift);
vdata->monotonic_time_coarse_sec = vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
......
...@@ -1081,19 +1081,19 @@ static void update_pvclock_gtod(struct timekeeper *tk) ...@@ -1081,19 +1081,19 @@ static void update_pvclock_gtod(struct timekeeper *tk)
struct pvclock_gtod_data *vdata = &pvclock_gtod_data; struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
u64 boot_ns; u64 boot_ns;
boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
write_seqcount_begin(&vdata->seq); write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */ /* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr.cycle_last; vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr.mask; vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr.mult; vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr.shift; vdata->clock.shift = tk->tkr_mono.shift;
vdata->boot_ns = boot_ns; vdata->boot_ns = boot_ns;
vdata->nsec_base = tk->tkr.xtime_nsec; vdata->nsec_base = tk->tkr_mono.xtime_nsec;
write_seqcount_end(&vdata->seq); write_seqcount_end(&vdata->seq);
} }
......
#include <linux/types.h> #include <linux/types.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
...@@ -81,17 +81,14 @@ void xen_arch_post_suspend(int cancelled) ...@@ -81,17 +81,14 @@ void xen_arch_post_suspend(int cancelled)
static void xen_vcpu_notify_restore(void *data) static void xen_vcpu_notify_restore(void *data)
{ {
unsigned long reason = (unsigned long)data;
/* Boot processor notified via generic timekeeping_resume() */ /* Boot processor notified via generic timekeeping_resume() */
if ( smp_processor_id() == 0) if (smp_processor_id() == 0)
return; return;
clockevents_notify(reason, NULL); tick_resume_local();
} }
void xen_arch_resume(void) void xen_arch_resume(void)
{ {
on_each_cpu(xen_vcpu_notify_restore, on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
(void *)CLOCK_EVT_NOTIFY_RESUME, 1);
} }
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <asm/mwait.h> #include <asm/mwait.h>
...@@ -41,8 +41,6 @@ static unsigned long power_saving_mwait_eax; ...@@ -41,8 +41,6 @@ static unsigned long power_saving_mwait_eax;
static unsigned char tsc_detected_unstable; static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable; static unsigned char tsc_marked_unstable;
static unsigned char lapic_detected_unstable;
static unsigned char lapic_marked_unstable;
static void power_saving_mwait_init(void) static void power_saving_mwait_init(void)
{ {
...@@ -82,13 +80,10 @@ static void power_saving_mwait_init(void) ...@@ -82,13 +80,10 @@ static void power_saving_mwait_init(void)
*/ */
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
tsc_detected_unstable = 1; tsc_detected_unstable = 1;
if (!boot_cpu_has(X86_FEATURE_ARAT))
lapic_detected_unstable = 1;
break; break;
default: default:
/* TSC & LAPIC could halt in idle */ /* TSC could halt in idle */
tsc_detected_unstable = 1; tsc_detected_unstable = 1;
lapic_detected_unstable = 1;
} }
#endif #endif
} }
...@@ -155,7 +150,6 @@ static int power_saving_thread(void *data) ...@@ -155,7 +150,6 @@ static int power_saving_thread(void *data)
sched_setscheduler(current, SCHED_RR, &param); sched_setscheduler(current, SCHED_RR, &param);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
int cpu;
unsigned long expire_time; unsigned long expire_time;
try_to_freeze(); try_to_freeze();
...@@ -177,28 +171,15 @@ static int power_saving_thread(void *data) ...@@ -177,28 +171,15 @@ static int power_saving_thread(void *data)
mark_tsc_unstable("TSC halts in idle"); mark_tsc_unstable("TSC halts in idle");
tsc_marked_unstable = 1; tsc_marked_unstable = 1;
} }
if (lapic_detected_unstable && !lapic_marked_unstable) {
int i;
/* LAPIC could halt in idle, so notify users */
for_each_online_cpu(i)
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_ON,
&i);
lapic_marked_unstable = 1;
}
local_irq_disable(); local_irq_disable();
cpu = smp_processor_id(); tick_broadcast_enable();
if (lapic_marked_unstable) tick_broadcast_enter();
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings(); stop_critical_timings();
mwait_idle_with_hints(power_saving_mwait_eax, 1); mwait_idle_with_hints(power_saving_mwait_eax, 1);
start_critical_timings(); start_critical_timings();
if (lapic_marked_unstable) tick_broadcast_exit();
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
local_irq_enable(); local_irq_enable();
if (time_before(expire_time, jiffies)) { if (time_before(expire_time, jiffies)) {
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */ #include <linux/sched.h> /* need_resched() */
#include <linux/clockchips.h> #include <linux/tick.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <acpi/processor.h> #include <acpi/processor.h>
...@@ -157,12 +157,11 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, ...@@ -157,12 +157,11 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
static void __lapic_timer_propagate_broadcast(void *arg) static void __lapic_timer_propagate_broadcast(void *arg)
{ {
struct acpi_processor *pr = (struct acpi_processor *) arg; struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason;
reason = pr->power.timer_broadcast_on_state < INT_MAX ? if (pr->power.timer_broadcast_on_state < INT_MAX)
CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; tick_broadcast_enable();
else
clockevents_notify(reason, &pr->id); tick_broadcast_disable();
} }
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
...@@ -179,11 +178,10 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, ...@@ -179,11 +178,10 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
int state = cx - pr->power.states; int state = cx - pr->power.states;
if (state >= pr->power.timer_broadcast_on_state) { if (state >= pr->power.timer_broadcast_on_state) {
unsigned long reason; if (broadcast)
tick_broadcast_enter();
reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : else
CLOCK_EVT_NOTIFY_BROADCAST_EXIT; tick_broadcast_exit();
clockevents_notify(reason, &pr->id);
} }
} }
......
...@@ -661,17 +661,17 @@ static const struct of_device_id arch_timer_mem_of_match[] __initconst = { ...@@ -661,17 +661,17 @@ static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
}; };
static bool __init static bool __init
arch_timer_probed(int type, const struct of_device_id *matches) arch_timer_needs_probing(int type, const struct of_device_id *matches)
{ {
struct device_node *dn; struct device_node *dn;
bool probed = true; bool needs_probing = false;
dn = of_find_matching_node(NULL, matches); dn = of_find_matching_node(NULL, matches);
if (dn && of_device_is_available(dn) && !(arch_timers_present & type)) if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
probed = false; needs_probing = true;
of_node_put(dn); of_node_put(dn);
return probed; return needs_probing;
} }
static void __init arch_timer_common_init(void) static void __init arch_timer_common_init(void)
...@@ -680,9 +680,9 @@ static void __init arch_timer_common_init(void) ...@@ -680,9 +680,9 @@ static void __init arch_timer_common_init(void)
/* Wait until both nodes are probed if we have two timers */ /* Wait until both nodes are probed if we have two timers */
if ((arch_timers_present & mask) != mask) { if ((arch_timers_present & mask) != mask) {
if (!arch_timer_probed(ARCH_MEM_TIMER, arch_timer_mem_of_match)) if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
return; return;
if (!arch_timer_probed(ARCH_CP15_TIMER, arch_timer_of_match)) if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
return; return;
} }
......
...@@ -108,7 +108,7 @@ static void __init add_clocksource(struct device_node *source_timer) ...@@ -108,7 +108,7 @@ static void __init add_clocksource(struct device_node *source_timer)
static u64 notrace read_sched_clock(void) static u64 notrace read_sched_clock(void)
{ {
return ~__raw_readl(sched_io_base); return ~readl_relaxed(sched_io_base);
} }
static const struct of_device_id sptimer_ids[] __initconst = { static const struct of_device_id sptimer_ids[] __initconst = {
......
...@@ -210,7 +210,7 @@ static int em_sti_clocksource_enable(struct clocksource *cs) ...@@ -210,7 +210,7 @@ static int em_sti_clocksource_enable(struct clocksource *cs)
ret = em_sti_start(p, USER_CLOCKSOURCE); ret = em_sti_start(p, USER_CLOCKSOURCE);
if (!ret) if (!ret)
__clocksource_updatefreq_hz(cs, p->rate); __clocksource_update_freq_hz(cs, p->rate);
return ret; return ret;
} }
......
...@@ -641,7 +641,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs) ...@@ -641,7 +641,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
if (!ret) { if (!ret) {
__clocksource_updatefreq_hz(cs, ch->rate); __clocksource_update_freq_hz(cs, ch->rate);
ch->cs_enabled = true; ch->cs_enabled = true;
} }
return ret; return ret;
......
...@@ -272,7 +272,7 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs) ...@@ -272,7 +272,7 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
ret = sh_tmu_enable(ch); ret = sh_tmu_enable(ch);
if (!ret) { if (!ret) {
__clocksource_updatefreq_hz(cs, ch->rate); __clocksource_update_freq_hz(cs, ch->rate);
ch->cs_enabled = true; ch->cs_enabled = true;
} }
......
...@@ -170,7 +170,15 @@ static void __init sun4i_timer_init(struct device_node *node) ...@@ -170,7 +170,15 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_base + TIMER_CTL_REG(1)); timer_base + TIMER_CTL_REG(1));
sched_clock_register(sun4i_timer_sched_read, 32, rate); /*
* sched_clock_register does not have priorities, and on sun6i and
* later there is a better sched_clock registered by arm_arch_timer.c
*/
if (of_machine_is_compatible("allwinner,sun4i-a10") ||
of_machine_is_compatible("allwinner,sun5i-a13") ||
of_machine_is_compatible("allwinner,sun5i-a10s"))
sched_clock_register(sun4i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
rate, 350, 32, clocksource_mmio_readl_down); rate, 350, 32, clocksource_mmio_readl_down);
......
...@@ -51,15 +51,15 @@ ...@@ -51,15 +51,15 @@
static void __iomem *timer_reg_base; static void __iomem *timer_reg_base;
static void __iomem *rtc_base; static void __iomem *rtc_base;
static struct timespec persistent_ts; static struct timespec64 persistent_ts;
static u64 persistent_ms, last_persistent_ms; static u64 persistent_ms, last_persistent_ms;
static struct delay_timer tegra_delay_timer; static struct delay_timer tegra_delay_timer;
#define timer_writel(value, reg) \ #define timer_writel(value, reg) \
__raw_writel(value, timer_reg_base + (reg)) writel_relaxed(value, timer_reg_base + (reg))
#define timer_readl(reg) \ #define timer_readl(reg) \
__raw_readl(timer_reg_base + (reg)) readl_relaxed(timer_reg_base + (reg))
static int tegra_timer_set_next_event(unsigned long cycles, static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt) struct clock_event_device *evt)
...@@ -120,26 +120,25 @@ static u64 tegra_rtc_read_ms(void) ...@@ -120,26 +120,25 @@ static u64 tegra_rtc_read_ms(void)
} }
/* /*
* tegra_read_persistent_clock - Return time from a persistent clock. * tegra_read_persistent_clock64 - Return time from a persistent clock.
* *
* Reads the time from a source which isn't disabled during PM, the * Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into * 32k sync timer. Convert the cycles elapsed since last read into
* nsecs and adds to a monotonically increasing timespec. * nsecs and adds to a monotonically increasing timespec64.
* Care must be taken that this funciton is not called while the * Care must be taken that this funciton is not called while the
* tegra_rtc driver could be executing to avoid race conditions * tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register * on the RTC shadow register
*/ */
static void tegra_read_persistent_clock(struct timespec *ts) static void tegra_read_persistent_clock64(struct timespec64 *ts)
{ {
u64 delta; u64 delta;
struct timespec *tsp = &persistent_ts;
last_persistent_ms = persistent_ms; last_persistent_ms = persistent_ms;
persistent_ms = tegra_rtc_read_ms(); persistent_ms = tegra_rtc_read_ms();
delta = persistent_ms - last_persistent_ms; delta = persistent_ms - last_persistent_ms;
timespec_add_ns(tsp, delta * NSEC_PER_MSEC); timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
*ts = *tsp; *ts = persistent_ts;
} }
static unsigned long tegra_delay_timer_read_counter_long(void) static unsigned long tegra_delay_timer_read_counter_long(void)
...@@ -252,7 +251,7 @@ static void __init tegra20_init_rtc(struct device_node *np) ...@@ -252,7 +251,7 @@ static void __init tegra20_init_rtc(struct device_node *np)
else else
clk_prepare_enable(clk); clk_prepare_enable(clk);
register_persistent_clock(NULL, tegra_read_persistent_clock); register_persistent_clock(NULL, tegra_read_persistent_clock64);
} }
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
......
...@@ -111,7 +111,7 @@ static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id) ...@@ -111,7 +111,7 @@ static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
static struct efm32_clock_event_ddata clock_event_ddata = { static struct efm32_clock_event_ddata clock_event_ddata = {
.evtdev = { .evtdev = {
.name = "efm32 clockevent", .name = "efm32 clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_MODE_PERIODIC, .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_mode = efm32_clock_event_set_mode, .set_mode = efm32_clock_event_set_mode,
.set_next_event = efm32_clock_event_set_next_event, .set_next_event = efm32_clock_event_set_next_event,
.rating = 200, .rating = 200,
......
...@@ -61,12 +61,12 @@ static inline struct pit_data *clkevt_to_pit_data(struct clock_event_device *clk ...@@ -61,12 +61,12 @@ static inline struct pit_data *clkevt_to_pit_data(struct clock_event_device *clk
static inline unsigned int pit_read(void __iomem *base, unsigned int reg_offset) static inline unsigned int pit_read(void __iomem *base, unsigned int reg_offset)
{ {
return __raw_readl(base + reg_offset); return readl_relaxed(base + reg_offset);
} }
static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsigned long value) static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsigned long value)
{ {
__raw_writel(value, base + reg_offset); writel_relaxed(value, base + reg_offset);
} }
/* /*
......
This diff is collapsed.
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include "cpuidle.h" #include "cpuidle.h"
...@@ -130,21 +130,20 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) ...@@ -130,21 +130,20 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
#endif #endif
/** /**
* cpuidle_setup_broadcast_timer - enable/disable the broadcast timer * cpuidle_setup_broadcast_timer - enable/disable the broadcast timer on a cpu
* @arg: a void pointer used to match the SMP cross call API * @arg: a void pointer used to match the SMP cross call API
* *
* @arg is used as a value of type 'long' with one of the two values: * If @arg is NULL broadcast is disabled otherwise enabled
* - CLOCK_EVT_NOTIFY_BROADCAST_ON
* - CLOCK_EVT_NOTIFY_BROADCAST_OFF
* *
* Set the broadcast timer notification for the current CPU. This function * This function is executed per CPU by an SMP cross call. It's not
* is executed per CPU by an SMP cross call. It not supposed to be called * supposed to be called directly.
* directly.
*/ */
static void cpuidle_setup_broadcast_timer(void *arg) static void cpuidle_setup_broadcast_timer(void *arg)
{ {
int cpu = smp_processor_id(); if (arg)
clockevents_notify((long)(arg), &cpu); tick_broadcast_enable();
else
tick_broadcast_disable();
} }
/** /**
...@@ -239,7 +238,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) ...@@ -239,7 +238,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (drv->bctimer) if (drv->bctimer)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1); (void *)1, 1);
poll_idle_init(drv); poll_idle_init(drv);
...@@ -263,7 +262,7 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) ...@@ -263,7 +262,7 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
if (drv->bctimer) { if (drv->bctimer) {
drv->bctimer = 0; drv->bctimer = 0;
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1); NULL, 1);
} }
__cpuidle_unset_driver(drv); __cpuidle_unset_driver(drv);
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/clockchips.h> #include <linux/tick.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/notifier.h> #include <linux/notifier.h>
...@@ -638,12 +638,12 @@ static int intel_idle(struct cpuidle_device *dev, ...@@ -638,12 +638,12 @@ static int intel_idle(struct cpuidle_device *dev,
leave_mm(cpu); leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); tick_broadcast_enter();
mwait_idle_with_hints(eax, ecx); mwait_idle_with_hints(eax, ecx);
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); tick_broadcast_exit();
return index; return index;
} }
...@@ -665,13 +665,12 @@ static void intel_idle_freeze(struct cpuidle_device *dev, ...@@ -665,13 +665,12 @@ static void intel_idle_freeze(struct cpuidle_device *dev,
static void __setup_broadcast_timer(void *arg) static void __setup_broadcast_timer(void *arg)
{ {
unsigned long reason = (unsigned long)arg; unsigned long on = (unsigned long)arg;
int cpu = smp_processor_id();
reason = reason ?
CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
clockevents_notify(reason, &cpu); if (on)
tick_broadcast_enable();
else
tick_broadcast_disable();
} }
static int cpu_hotplug_notify(struct notifier_block *n, static int cpu_hotplug_notify(struct notifier_block *n,
......
...@@ -55,7 +55,7 @@ static int rtc_suspend(struct device *dev) ...@@ -55,7 +55,7 @@ static int rtc_suspend(struct device *dev)
struct timespec64 delta, delta_delta; struct timespec64 delta, delta_delta;
int err; int err;
if (has_persistent_clock()) if (timekeeping_rtc_skipsuspend())
return 0; return 0;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
...@@ -102,7 +102,7 @@ static int rtc_resume(struct device *dev) ...@@ -102,7 +102,7 @@ static int rtc_resume(struct device *dev)
struct timespec64 sleep_time; struct timespec64 sleep_time;
int err; int err;
if (has_persistent_clock()) if (timekeeping_rtc_skipresume())
return 0; return 0;
rtc_hctosys_ret = -ENODEV; rtc_hctosys_ret = -ENODEV;
...@@ -117,10 +117,6 @@ static int rtc_resume(struct device *dev) ...@@ -117,10 +117,6 @@ static int rtc_resume(struct device *dev)
return 0; return 0;
} }
if (rtc_valid_tm(&tm) != 0) {
pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
return 0;
}
new_rtc.tv_sec = rtc_tm_to_time64(&tm); new_rtc.tv_sec = rtc_tm_to_time64(&tm);
new_rtc.tv_nsec = 0; new_rtc.tv_nsec = 0;
......
...@@ -72,7 +72,11 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) ...@@ -72,7 +72,11 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
err = -ENODEV; err = -ENODEV;
else if (rtc->ops->set_time) else if (rtc->ops->set_time)
err = rtc->ops->set_time(rtc->dev.parent, tm); err = rtc->ops->set_time(rtc->dev.parent, tm);
else if (rtc->ops->set_mmss) { else if (rtc->ops->set_mmss64) {
time64_t secs64 = rtc_tm_to_time64(tm);
err = rtc->ops->set_mmss64(rtc->dev.parent, secs64);
} else if (rtc->ops->set_mmss) {
time64_t secs64 = rtc_tm_to_time64(tm); time64_t secs64 = rtc_tm_to_time64(tm);
err = rtc->ops->set_mmss(rtc->dev.parent, secs64); err = rtc->ops->set_mmss(rtc->dev.parent, secs64);
} else } else
...@@ -96,6 +100,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) ...@@ -96,6 +100,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
if (!rtc->ops) if (!rtc->ops)
err = -ENODEV; err = -ENODEV;
else if (rtc->ops->set_mmss64)
err = rtc->ops->set_mmss64(rtc->dev.parent, secs);
else if (rtc->ops->set_mmss) else if (rtc->ops->set_mmss)
err = rtc->ops->set_mmss(rtc->dev.parent, secs); err = rtc->ops->set_mmss(rtc->dev.parent, secs);
else if (rtc->ops->read_time && rtc->ops->set_time) { else if (rtc->ops->read_time && rtc->ops->set_time) {
......
...@@ -43,21 +43,21 @@ ...@@ -43,21 +43,21 @@
/* /*
* RTC clock functions and device struct declaration * RTC clock functions and device struct declaration
*/ */
static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) static int ab3100_rtc_set_mmss(struct device *dev, time64_t secs)
{ {
u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2, u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2,
AB3100_TI3, AB3100_TI4, AB3100_TI5}; AB3100_TI3, AB3100_TI4, AB3100_TI5};
unsigned char buf[6]; unsigned char buf[6];
u64 fat_time = (u64) secs * AB3100_RTC_CLOCK_RATE * 2; u64 hw_counter = secs * AB3100_RTC_CLOCK_RATE * 2;
int err = 0; int err = 0;
int i; int i;
buf[0] = (fat_time) & 0xFF; buf[0] = (hw_counter) & 0xFF;
buf[1] = (fat_time >> 8) & 0xFF; buf[1] = (hw_counter >> 8) & 0xFF;
buf[2] = (fat_time >> 16) & 0xFF; buf[2] = (hw_counter >> 16) & 0xFF;
buf[3] = (fat_time >> 24) & 0xFF; buf[3] = (hw_counter >> 24) & 0xFF;
buf[4] = (fat_time >> 32) & 0xFF; buf[4] = (hw_counter >> 32) & 0xFF;
buf[5] = (fat_time >> 40) & 0xFF; buf[5] = (hw_counter >> 40) & 0xFF;
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
err = abx500_set_register_interruptible(dev, 0, err = abx500_set_register_interruptible(dev, 0,
...@@ -75,7 +75,7 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) ...@@ -75,7 +75,7 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{ {
unsigned long time; time64_t time;
u8 rtcval; u8 rtcval;
int err; int err;
...@@ -88,7 +88,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) ...@@ -88,7 +88,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_info(dev, "clock not set (lost power)"); dev_info(dev, "clock not set (lost power)");
return -EINVAL; return -EINVAL;
} else { } else {
u64 fat_time; u64 hw_counter;
u8 buf[6]; u8 buf[6];
/* Read out time registers */ /* Read out time registers */
...@@ -98,22 +98,21 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) ...@@ -98,22 +98,21 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (err != 0) if (err != 0)
return err; return err;
fat_time = ((u64) buf[5] << 40) | ((u64) buf[4] << 32) | hw_counter = ((u64) buf[5] << 40) | ((u64) buf[4] << 32) |
((u64) buf[3] << 24) | ((u64) buf[2] << 16) | ((u64) buf[3] << 24) | ((u64) buf[2] << 16) |
((u64) buf[1] << 8) | (u64) buf[0]; ((u64) buf[1] << 8) | (u64) buf[0];
time = (unsigned long) (fat_time / time = hw_counter / (u64) (AB3100_RTC_CLOCK_RATE * 2);
(u64) (AB3100_RTC_CLOCK_RATE * 2));
} }
rtc_time_to_tm(time, tm); rtc_time64_to_tm(time, tm);
return rtc_valid_tm(tm); return rtc_valid_tm(tm);
} }
static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{ {
unsigned long time; time64_t time;
u64 fat_time; u64 hw_counter;
u8 buf[6]; u8 buf[6];
u8 rtcval; u8 rtcval;
int err; int err;
...@@ -134,11 +133,11 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) ...@@ -134,11 +133,11 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
AB3100_AL0, buf, 4); AB3100_AL0, buf, 4);
if (err) if (err)
return err; return err;
fat_time = ((u64) buf[3] << 40) | ((u64) buf[2] << 32) | hw_counter = ((u64) buf[3] << 40) | ((u64) buf[2] << 32) |
((u64) buf[1] << 24) | ((u64) buf[0] << 16); ((u64) buf[1] << 24) | ((u64) buf[0] << 16);
time = (unsigned long) (fat_time / (u64) (AB3100_RTC_CLOCK_RATE * 2)); time = hw_counter / (u64) (AB3100_RTC_CLOCK_RATE * 2);
rtc_time_to_tm(time, &alarm->time); rtc_time64_to_tm(time, &alarm->time);
return rtc_valid_tm(&alarm->time); return rtc_valid_tm(&alarm->time);
} }
...@@ -147,17 +146,17 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) ...@@ -147,17 +146,17 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{ {
u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3}; u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3};
unsigned char buf[4]; unsigned char buf[4];
unsigned long secs; time64_t secs;
u64 fat_time; u64 hw_counter;
int err; int err;
int i; int i;
rtc_tm_to_time(&alarm->time, &secs); secs = rtc_tm_to_time64(&alarm->time);
fat_time = (u64) secs * AB3100_RTC_CLOCK_RATE * 2; hw_counter = secs * AB3100_RTC_CLOCK_RATE * 2;
buf[0] = (fat_time >> 16) & 0xFF; buf[0] = (hw_counter >> 16) & 0xFF;
buf[1] = (fat_time >> 24) & 0xFF; buf[1] = (hw_counter >> 24) & 0xFF;
buf[2] = (fat_time >> 32) & 0xFF; buf[2] = (hw_counter >> 32) & 0xFF;
buf[3] = (fat_time >> 40) & 0xFF; buf[3] = (hw_counter >> 40) & 0xFF;
/* Set the alarm */ /* Set the alarm */
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
...@@ -193,7 +192,7 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) ...@@ -193,7 +192,7 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
static const struct rtc_class_ops ab3100_rtc_ops = { static const struct rtc_class_ops ab3100_rtc_ops = {
.read_time = ab3100_rtc_read_time, .read_time = ab3100_rtc_read_time,
.set_mmss = ab3100_rtc_set_mmss, .set_mmss64 = ab3100_rtc_set_mmss,
.read_alarm = ab3100_rtc_read_alarm, .read_alarm = ab3100_rtc_read_alarm,
.set_alarm = ab3100_rtc_set_alarm, .set_alarm = ab3100_rtc_set_alarm,
.alarm_irq_enable = ab3100_rtc_irq_enable, .alarm_irq_enable = ab3100_rtc_irq_enable,
......
...@@ -83,20 +83,19 @@ static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) ...@@ -83,20 +83,19 @@ static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
return ret; return ret;
} while (days1 != days2); } while (days1 != days2);
rtc_time_to_tm(days1 * SEC_PER_DAY + seconds, tm); rtc_time64_to_tm((time64_t)days1 * SEC_PER_DAY + seconds, tm);
return rtc_valid_tm(tm); return rtc_valid_tm(tm);
} }
static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs) static int mc13xxx_rtc_set_mmss(struct device *dev, time64_t secs)
{ {
struct mc13xxx_rtc *priv = dev_get_drvdata(dev); struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days; unsigned int seconds, days;
unsigned int alarmseconds; unsigned int alarmseconds;
int ret; int ret;
seconds = secs % SEC_PER_DAY; days = div_s64_rem(secs, SEC_PER_DAY, &seconds);
days = secs / SEC_PER_DAY;
mc13xxx_lock(priv->mc13xxx); mc13xxx_lock(priv->mc13xxx);
...@@ -159,7 +158,7 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) ...@@ -159,7 +158,7 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{ {
struct mc13xxx_rtc *priv = dev_get_drvdata(dev); struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned seconds, days; unsigned seconds, days;
unsigned long s1970; time64_t s1970;
int enabled, pending; int enabled, pending;
int ret; int ret;
...@@ -189,10 +188,10 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) ...@@ -189,10 +188,10 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm->enabled = enabled; alarm->enabled = enabled;
alarm->pending = pending; alarm->pending = pending;
s1970 = days * SEC_PER_DAY + seconds; s1970 = (time64_t)days * SEC_PER_DAY + seconds;
rtc_time_to_tm(s1970, &alarm->time); rtc_time64_to_tm(s1970, &alarm->time);
dev_dbg(dev, "%s: %lu\n", __func__, s1970); dev_dbg(dev, "%s: %lld\n", __func__, (long long)s1970);
return 0; return 0;
} }
...@@ -200,8 +199,8 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) ...@@ -200,8 +199,8 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{ {
struct mc13xxx_rtc *priv = dev_get_drvdata(dev); struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned long s1970; time64_t s1970;
unsigned seconds, days; u32 seconds, days;
int ret; int ret;
mc13xxx_lock(priv->mc13xxx); mc13xxx_lock(priv->mc13xxx);
...@@ -215,20 +214,17 @@ static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) ...@@ -215,20 +214,17 @@ static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out;
ret = rtc_tm_to_time(&alarm->time, &s1970); s1970 = rtc_tm_to_time64(&alarm->time);
if (unlikely(ret))
goto out;
dev_dbg(dev, "%s: o%2.s %lu\n", __func__, alarm->enabled ? "n" : "ff", dev_dbg(dev, "%s: o%2.s %lld\n", __func__, alarm->enabled ? "n" : "ff",
s1970); (long long)s1970);
ret = mc13xxx_rtc_irq_enable_unlocked(dev, alarm->enabled, ret = mc13xxx_rtc_irq_enable_unlocked(dev, alarm->enabled,
MC13XXX_IRQ_TODA); MC13XXX_IRQ_TODA);
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out;
seconds = s1970 % SEC_PER_DAY; days = div_s64_rem(s1970, SEC_PER_DAY, &seconds);
days = s1970 / SEC_PER_DAY;
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days); ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days);
if (unlikely(ret)) if (unlikely(ret))
...@@ -268,7 +264,7 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev) ...@@ -268,7 +264,7 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
static const struct rtc_class_ops mc13xxx_rtc_ops = { static const struct rtc_class_ops mc13xxx_rtc_ops = {
.read_time = mc13xxx_rtc_read_time, .read_time = mc13xxx_rtc_read_time,
.set_mmss = mc13xxx_rtc_set_mmss, .set_mmss64 = mc13xxx_rtc_set_mmss,
.read_alarm = mc13xxx_rtc_read_alarm, .read_alarm = mc13xxx_rtc_read_alarm,
.set_alarm = mc13xxx_rtc_set_alarm, .set_alarm = mc13xxx_rtc_set_alarm,
.alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable, .alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
......
...@@ -106,7 +106,7 @@ static inline int is_imx1_rtc(struct rtc_plat_data *data) ...@@ -106,7 +106,7 @@ static inline int is_imx1_rtc(struct rtc_plat_data *data)
* This function is used to obtain the RTC time or the alarm value in * This function is used to obtain the RTC time or the alarm value in
* second. * second.
*/ */
static u32 get_alarm_or_time(struct device *dev, int time_alarm) static time64_t get_alarm_or_time(struct device *dev, int time_alarm)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
...@@ -129,29 +129,28 @@ static u32 get_alarm_or_time(struct device *dev, int time_alarm) ...@@ -129,29 +129,28 @@ static u32 get_alarm_or_time(struct device *dev, int time_alarm)
hr = hr_min >> 8; hr = hr_min >> 8;
min = hr_min & 0xff; min = hr_min & 0xff;
return (((day * 24 + hr) * 60) + min) * 60 + sec; return ((((time64_t)day * 24 + hr) * 60) + min) * 60 + sec;
} }
/* /*
* This function sets the RTC alarm value or the time value. * This function sets the RTC alarm value or the time value.
*/ */
static void set_alarm_or_time(struct device *dev, int time_alarm, u32 time) static void set_alarm_or_time(struct device *dev, int time_alarm, time64_t time)
{ {
u32 day, hr, min, sec, temp; u32 tod, day, hr, min, sec, temp;
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr; void __iomem *ioaddr = pdata->ioaddr;
day = time / 86400; day = div_s64_rem(time, 86400, &tod);
time -= day * 86400;
/* time is within a day now */ /* time is within a day now */
hr = time / 3600; hr = tod / 3600;
time -= hr * 3600; tod -= hr * 3600;
/* time is within an hour now */ /* time is within an hour now */
min = time / 60; min = tod / 60;
sec = time - min * 60; sec = tod - min * 60;
temp = (hr << 8) + min; temp = (hr << 8) + min;
...@@ -173,29 +172,18 @@ static void set_alarm_or_time(struct device *dev, int time_alarm, u32 time) ...@@ -173,29 +172,18 @@ static void set_alarm_or_time(struct device *dev, int time_alarm, u32 time)
* This function updates the RTC alarm registers and then clears all the * This function updates the RTC alarm registers and then clears all the
* interrupt status bits. * interrupt status bits.
*/ */
static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm) static void rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
{ {
struct rtc_time alarm_tm, now_tm; time64_t time;
unsigned long now, time;
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr; void __iomem *ioaddr = pdata->ioaddr;
now = get_alarm_or_time(dev, MXC_RTC_TIME); time = rtc_tm_to_time64(alrm);
rtc_time_to_tm(now, &now_tm);
alarm_tm.tm_year = now_tm.tm_year;
alarm_tm.tm_mon = now_tm.tm_mon;
alarm_tm.tm_mday = now_tm.tm_mday;
alarm_tm.tm_hour = alrm->tm_hour;
alarm_tm.tm_min = alrm->tm_min;
alarm_tm.tm_sec = alrm->tm_sec;
rtc_tm_to_time(&alarm_tm, &time);
/* clear all the interrupt status bits */ /* clear all the interrupt status bits */
writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR); writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
set_alarm_or_time(dev, MXC_RTC_ALARM, time); set_alarm_or_time(dev, MXC_RTC_ALARM, time);
return 0;
} }
static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit, static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
...@@ -283,14 +271,14 @@ static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) ...@@ -283,14 +271,14 @@ static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/ */
static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm) static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
{ {
u32 val; time64_t val;
/* Avoid roll-over from reading the different registers */ /* Avoid roll-over from reading the different registers */
do { do {
val = get_alarm_or_time(dev, MXC_RTC_TIME); val = get_alarm_or_time(dev, MXC_RTC_TIME);
} while (val != get_alarm_or_time(dev, MXC_RTC_TIME)); } while (val != get_alarm_or_time(dev, MXC_RTC_TIME));
rtc_time_to_tm(val, tm); rtc_time64_to_tm(val, tm);
return 0; return 0;
} }
...@@ -298,7 +286,7 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm) ...@@ -298,7 +286,7 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* /*
* This function sets the internal RTC time based on tm in Gregorian date. * This function sets the internal RTC time based on tm in Gregorian date.
*/ */
static int mxc_rtc_set_mmss(struct device *dev, unsigned long time) static int mxc_rtc_set_mmss(struct device *dev, time64_t time)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
...@@ -309,9 +297,9 @@ static int mxc_rtc_set_mmss(struct device *dev, unsigned long time) ...@@ -309,9 +297,9 @@ static int mxc_rtc_set_mmss(struct device *dev, unsigned long time)
if (is_imx1_rtc(pdata)) { if (is_imx1_rtc(pdata)) {
struct rtc_time tm; struct rtc_time tm;
rtc_time_to_tm(time, &tm); rtc_time64_to_tm(time, &tm);
tm.tm_year = 70; tm.tm_year = 70;
rtc_tm_to_time(&tm, &time); time = rtc_tm_to_time64(&tm);
} }
/* Avoid roll-over from reading the different registers */ /* Avoid roll-over from reading the different registers */
...@@ -333,7 +321,7 @@ static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) ...@@ -333,7 +321,7 @@ static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_plat_data *pdata = platform_get_drvdata(pdev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr; void __iomem *ioaddr = pdata->ioaddr;
rtc_time_to_tm(get_alarm_or_time(dev, MXC_RTC_ALARM), &alrm->time); rtc_time64_to_tm(get_alarm_or_time(dev, MXC_RTC_ALARM), &alrm->time);
alrm->pending = ((readw(ioaddr + RTC_RTCISR) & RTC_ALM_BIT)) ? 1 : 0; alrm->pending = ((readw(ioaddr + RTC_RTCISR) & RTC_ALM_BIT)) ? 1 : 0;
return 0; return 0;
...@@ -346,11 +334,8 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) ...@@ -346,11 +334,8 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
int ret;
ret = rtc_update_alarm(dev, &alrm->time); rtc_update_alarm(dev, &alrm->time);
if (ret)
return ret;
memcpy(&pdata->g_rtc_alarm, &alrm->time, sizeof(struct rtc_time)); memcpy(&pdata->g_rtc_alarm, &alrm->time, sizeof(struct rtc_time));
mxc_rtc_irq_enable(dev, RTC_ALM_BIT, alrm->enabled); mxc_rtc_irq_enable(dev, RTC_ALM_BIT, alrm->enabled);
...@@ -362,7 +347,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) ...@@ -362,7 +347,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static struct rtc_class_ops mxc_rtc_ops = { static struct rtc_class_ops mxc_rtc_ops = {
.release = mxc_rtc_release, .release = mxc_rtc_release,
.read_time = mxc_rtc_read_time, .read_time = mxc_rtc_read_time,
.set_mmss = mxc_rtc_set_mmss, .set_mmss64 = mxc_rtc_set_mmss,
.read_alarm = mxc_rtc_read_alarm, .read_alarm = mxc_rtc_read_alarm,
.set_alarm = mxc_rtc_set_alarm, .set_alarm = mxc_rtc_set_alarm,
.alarm_irq_enable = mxc_rtc_alarm_irq_enable, .alarm_irq_enable = mxc_rtc_alarm_irq_enable,
......
...@@ -13,6 +13,10 @@ ...@@ -13,6 +13,10 @@
#include <linux/rtc.h> #include <linux/rtc.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
static int test_mmss64;
module_param(test_mmss64, int, 0644);
MODULE_PARM_DESC(test_mmss64, "Test struct rtc_class_ops.set_mmss64().");
static struct platform_device *test0 = NULL, *test1 = NULL; static struct platform_device *test0 = NULL, *test1 = NULL;
static int test_rtc_read_alarm(struct device *dev, static int test_rtc_read_alarm(struct device *dev,
...@@ -30,7 +34,13 @@ static int test_rtc_set_alarm(struct device *dev, ...@@ -30,7 +34,13 @@ static int test_rtc_set_alarm(struct device *dev,
static int test_rtc_read_time(struct device *dev, static int test_rtc_read_time(struct device *dev,
struct rtc_time *tm) struct rtc_time *tm)
{ {
rtc_time_to_tm(get_seconds(), tm); rtc_time64_to_tm(ktime_get_real_seconds(), tm);
return 0;
}
static int test_rtc_set_mmss64(struct device *dev, time64_t secs)
{
dev_info(dev, "%s, secs = %lld\n", __func__, (long long)secs);
return 0; return 0;
} }
...@@ -55,7 +65,7 @@ static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) ...@@ -55,7 +65,7 @@ static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
return 0; return 0;
} }
static const struct rtc_class_ops test_rtc_ops = { static struct rtc_class_ops test_rtc_ops = {
.proc = test_rtc_proc, .proc = test_rtc_proc,
.read_time = test_rtc_read_time, .read_time = test_rtc_read_time,
.read_alarm = test_rtc_read_alarm, .read_alarm = test_rtc_read_alarm,
...@@ -101,6 +111,11 @@ static int test_probe(struct platform_device *plat_dev) ...@@ -101,6 +111,11 @@ static int test_probe(struct platform_device *plat_dev)
int err; int err;
struct rtc_device *rtc; struct rtc_device *rtc;
if (test_mmss64) {
test_rtc_ops.set_mmss64 = test_rtc_set_mmss64;
test_rtc_ops.set_mmss = NULL;
}
rtc = devm_rtc_device_register(&plat_dev->dev, "test", rtc = devm_rtc_device_register(&plat_dev->dev, "test",
&test_rtc_ops, THIS_MODULE); &test_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) { if (IS_ERR(rtc)) {
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* rtc_set_ntp_time - Save NTP synchronized time to the RTC * rtc_set_ntp_time - Save NTP synchronized time to the RTC
* @now: Current time of day * @now: Current time of day
* *
* Replacement for the NTP platform function update_persistent_clock * Replacement for the NTP platform function update_persistent_clock64
* that stores time for later retrieval by rtc_hctosys. * that stores time for later retrieval by rtc_hctosys.
* *
* Returns 0 on successful RTC update, -ENODEV if a RTC update is not * Returns 0 on successful RTC update, -ENODEV if a RTC update is not
...@@ -35,7 +35,10 @@ int rtc_set_ntp_time(struct timespec64 now) ...@@ -35,7 +35,10 @@ int rtc_set_ntp_time(struct timespec64 now)
if (rtc) { if (rtc) {
/* rtc_hctosys exclusively uses UTC, so we call set_time here, /* rtc_hctosys exclusively uses UTC, so we call set_time here,
* not set_mmss. */ * not set_mmss. */
if (rtc->ops && (rtc->ops->set_time || rtc->ops->set_mmss)) if (rtc->ops &&
(rtc->ops->set_time ||
rtc->ops->set_mmss64 ||
rtc->ops->set_mmss))
err = rtc_set_time(rtc, &tm); err = rtc_set_time(rtc, &tm);
rtc_class_close(rtc); rtc_class_close(rtc);
} }
......
...@@ -8,64 +8,69 @@ ...@@ -8,64 +8,69 @@
#ifndef _LINUX_CLOCKCHIPS_H #ifndef _LINUX_CLOCKCHIPS_H
#define _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H
/* Clock event notification values */ #ifdef CONFIG_GENERIC_CLOCKEVENTS
enum clock_event_nofitiers {
CLOCK_EVT_NOTIFY_ADD,
CLOCK_EVT_NOTIFY_BROADCAST_ON,
CLOCK_EVT_NOTIFY_BROADCAST_OFF,
CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
CLOCK_EVT_NOTIFY_SUSPEND,
CLOCK_EVT_NOTIFY_RESUME,
CLOCK_EVT_NOTIFY_CPU_DYING,
CLOCK_EVT_NOTIFY_CPU_DEAD,
};
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
#include <linux/clocksource.h> # include <linux/clocksource.h>
#include <linux/cpumask.h> # include <linux/cpumask.h>
#include <linux/ktime.h> # include <linux/ktime.h>
#include <linux/notifier.h> # include <linux/notifier.h>
struct clock_event_device; struct clock_event_device;
struct module; struct module;
/* Clock event mode commands */ /* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
enum clock_event_mode { enum clock_event_mode {
CLOCK_EVT_MODE_UNUSED = 0, CLOCK_EVT_MODE_UNUSED,
CLOCK_EVT_MODE_SHUTDOWN, CLOCK_EVT_MODE_SHUTDOWN,
CLOCK_EVT_MODE_PERIODIC, CLOCK_EVT_MODE_PERIODIC,
CLOCK_EVT_MODE_ONESHOT, CLOCK_EVT_MODE_ONESHOT,
CLOCK_EVT_MODE_RESUME, CLOCK_EVT_MODE_RESUME,
}; };
/*
* Possible states of a clock event device.
*
* DETACHED: Device is not used by clockevents core. Initial state or can be
* reached from SHUTDOWN.
* SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT.
* PERIODIC: Device is programmed to generate events periodically. Can be
* reached from DETACHED or SHUTDOWN.
* ONESHOT: Device is programmed to generate event only once. Can be reached
* from DETACHED or SHUTDOWN.
*/
enum clock_event_state {
CLOCK_EVT_STATE_DETACHED,
CLOCK_EVT_STATE_SHUTDOWN,
CLOCK_EVT_STATE_PERIODIC,
CLOCK_EVT_STATE_ONESHOT,
};
/* /*
* Clock event features * Clock event features
*/ */
#define CLOCK_EVT_FEAT_PERIODIC 0x000001 # define CLOCK_EVT_FEAT_PERIODIC 0x000001
#define CLOCK_EVT_FEAT_ONESHOT 0x000002 # define CLOCK_EVT_FEAT_ONESHOT 0x000002
#define CLOCK_EVT_FEAT_KTIME 0x000004 # define CLOCK_EVT_FEAT_KTIME 0x000004
/* /*
* x86(64) specific misfeatures: * x86(64) specific (mis)features:
* *
* - Clockevent source stops in C3 State and needs broadcast support. * - Clockevent source stops in C3 State and needs broadcast support.
* - Local APIC timer is used as a dummy device. * - Local APIC timer is used as a dummy device.
*/ */
#define CLOCK_EVT_FEAT_C3STOP 0x000008 # define CLOCK_EVT_FEAT_C3STOP 0x000008
#define CLOCK_EVT_FEAT_DUMMY 0x000010 # define CLOCK_EVT_FEAT_DUMMY 0x000010
/* /*
* Core shall set the interrupt affinity dynamically in broadcast mode * Core shall set the interrupt affinity dynamically in broadcast mode
*/ */
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020 # define CLOCK_EVT_FEAT_DYNIRQ 0x000020
#define CLOCK_EVT_FEAT_PERCPU 0x000040 # define CLOCK_EVT_FEAT_PERCPU 0x000040
/* /*
* Clockevent device is based on a hrtimer for broadcast * Clockevent device is based on a hrtimer for broadcast
*/ */
#define CLOCK_EVT_FEAT_HRTIMER 0x000080 # define CLOCK_EVT_FEAT_HRTIMER 0x000080
/** /**
* struct clock_event_device - clock event device descriptor * struct clock_event_device - clock event device descriptor
...@@ -78,10 +83,15 @@ enum clock_event_mode { ...@@ -78,10 +83,15 @@ enum clock_event_mode {
* @min_delta_ns: minimum delta value in ns * @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier * @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two) * @shift: nanoseconds to cycles divisor (power of two)
* @mode: operating mode assigned by the management code * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
* @state: current state of the device, assigned by the core code
* @features: features * @features: features
* @retries: number of forced programming retries * @retries: number of forced programming retries
* @set_mode: set mode function * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
* @set_state_periodic: switch state to periodic, if !set_mode
* @set_state_oneshot: switch state to oneshot, if !set_mode
* @set_state_shutdown: switch state to shutdown, if !set_mode
* @tick_resume: resume clkevt device, if !set_mode
* @broadcast: function to broadcast events * @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
...@@ -95,22 +105,31 @@ enum clock_event_mode { ...@@ -95,22 +105,31 @@ enum clock_event_mode {
*/ */
struct clock_event_device { struct clock_event_device {
void (*event_handler)(struct clock_event_device *); void (*event_handler)(struct clock_event_device *);
int (*set_next_event)(unsigned long evt, int (*set_next_event)(unsigned long evt, struct clock_event_device *);
struct clock_event_device *); int (*set_next_ktime)(ktime_t expires, struct clock_event_device *);
int (*set_next_ktime)(ktime_t expires,
struct clock_event_device *);
ktime_t next_event; ktime_t next_event;
u64 max_delta_ns; u64 max_delta_ns;
u64 min_delta_ns; u64 min_delta_ns;
u32 mult; u32 mult;
u32 shift; u32 shift;
enum clock_event_mode mode; enum clock_event_mode mode;
enum clock_event_state state;
unsigned int features; unsigned int features;
unsigned long retries; unsigned long retries;
/*
* State transition callback(s): Only one of the two groups should be
* defined:
* - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
* - set_state_{shutdown|periodic|oneshot}(), tick_resume().
*/
void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
int (*set_state_shutdown)(struct clock_event_device *);
int (*tick_resume)(struct clock_event_device *);
void (*broadcast)(const struct cpumask *mask); void (*broadcast)(const struct cpumask *mask);
void (*set_mode)(enum clock_event_mode mode,
struct clock_event_device *);
void (*suspend)(struct clock_event_device *); void (*suspend)(struct clock_event_device *);
void (*resume)(struct clock_event_device *); void (*resume)(struct clock_event_device *);
unsigned long min_delta_ticks; unsigned long min_delta_ticks;
...@@ -136,18 +155,18 @@ struct clock_event_device { ...@@ -136,18 +155,18 @@ struct clock_event_device {
* *
* factor = (clock_ticks << shift) / nanoseconds * factor = (clock_ticks << shift) / nanoseconds
*/ */
static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, static inline unsigned long
int shift) div_sc(unsigned long ticks, unsigned long nsec, int shift)
{ {
uint64_t tmp = ((uint64_t)ticks) << shift; u64 tmp = ((u64)ticks) << shift;
do_div(tmp, nsec); do_div(tmp, nsec);
return (unsigned long) tmp; return (unsigned long) tmp;
} }
/* Clock event layer functions */ /* Clock event layer functions */
extern u64 clockevent_delta2ns(unsigned long latch, extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt);
struct clock_event_device *evt);
extern void clockevents_register_device(struct clock_event_device *dev); extern void clockevents_register_device(struct clock_event_device *dev);
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
...@@ -158,57 +177,42 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, ...@@ -158,57 +177,42 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
extern void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new);
extern void clockevents_set_mode(struct clock_event_device *dev,
enum clock_event_mode mode);
extern int clockevents_program_event(struct clock_event_device *dev,
ktime_t expires, bool force);
extern void clockevents_handle_noop(struct clock_event_device *dev);
static inline void static inline void
clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
{ {
return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
freq, minsec);
} }
extern void clockevents_suspend(void); extern void clockevents_suspend(void);
extern void clockevents_resume(void); extern void clockevents_resume(void);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST # ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
extern void tick_broadcast(const struct cpumask *mask); extern void tick_broadcast(const struct cpumask *mask);
#else # else
#define tick_broadcast NULL # define tick_broadcast NULL
#endif # endif
extern int tick_receive_broadcast(void); extern int tick_receive_broadcast(void);
#endif # endif
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) # if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
extern void tick_setup_hrtimer_broadcast(void); extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void); extern int tick_check_broadcast_expired(void);
#else # else
static inline int tick_check_broadcast_expired(void) { return 0; } static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) {}; static inline void tick_setup_hrtimer_broadcast(void) { }
#endif # endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int clockevents_notify(unsigned long reason, void *arg); extern int clockevents_notify(unsigned long reason, void *arg);
#else
static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
#endif
#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
static inline void clockevents_suspend(void) {} #else /* !CONFIG_GENERIC_CLOCKEVENTS: */
static inline void clockevents_resume(void) {}
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; } static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
static inline int tick_check_broadcast_expired(void) { return 0; } static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) {}; static inline void tick_setup_hrtimer_broadcast(void) { }
#endif #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
#endif #endif /* _LINUX_CLOCKCHIPS_H */
...@@ -56,6 +56,7 @@ struct module; ...@@ -56,6 +56,7 @@ struct module;
* @shift: cycle to nanosecond divisor (power of two) * @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs) * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
* @maxadj: maximum adjustment value to mult (~11%) * @maxadj: maximum adjustment value to mult (~11%)
* @max_cycles: maximum safe cycle value which won't overflow on multiplication
* @flags: flags describing special properties * @flags: flags describing special properties
* @archdata: arch-specific data * @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary * @suspend: suspend function for the clocksource, if necessary
...@@ -76,7 +77,7 @@ struct clocksource { ...@@ -76,7 +77,7 @@ struct clocksource {
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata; struct arch_clocksource_data archdata;
#endif #endif
u64 max_cycles;
const char *name; const char *name;
struct list_head list; struct list_head list;
int rating; int rating;
...@@ -178,7 +179,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) ...@@ -178,7 +179,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
} }
extern int clocksource_register(struct clocksource*);
extern int clocksource_unregister(struct clocksource*); extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void); extern void clocksource_touch_watchdog(void);
extern struct clocksource* clocksource_get_next(void); extern struct clocksource* clocksource_get_next(void);
...@@ -189,7 +189,7 @@ extern struct clocksource * __init clocksource_default_clock(void); ...@@ -189,7 +189,7 @@ extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs); extern void clocksource_mark_unstable(struct clocksource *cs);
extern u64 extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask); clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
extern void extern void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
...@@ -200,7 +200,16 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); ...@@ -200,7 +200,16 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
extern int extern int
__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
extern void extern void
__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq);
/*
* Don't call this unless you are a default clocksource
* (AKA: jiffies) and absolutely have to.
*/
static inline int __clocksource_register(struct clocksource *cs)
{
return __clocksource_register_scale(cs, 1, 0);
}
static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
{ {
...@@ -212,14 +221,14 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) ...@@ -212,14 +221,14 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
return __clocksource_register_scale(cs, 1000, khz); return __clocksource_register_scale(cs, 1000, khz);
} }
static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz)
{ {
__clocksource_updatefreq_scale(cs, 1, hz); __clocksource_update_freq_scale(cs, 1, hz);
} }
static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz)
{ {
__clocksource_updatefreq_scale(cs, 1000, khz); __clocksource_update_freq_scale(cs, 1000, khz);
} }
......
...@@ -77,6 +77,7 @@ struct rtc_class_ops { ...@@ -77,6 +77,7 @@ struct rtc_class_ops {
int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*read_alarm)(struct device *, struct rtc_wkalrm *);
int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *);
int (*proc)(struct device *, struct seq_file *); int (*proc)(struct device *, struct seq_file *);
int (*set_mmss64)(struct device *, time64_t secs);
int (*set_mmss)(struct device *, unsigned long secs); int (*set_mmss)(struct device *, unsigned long secs);
int (*read_callback)(struct device *, int data); int (*read_callback)(struct device *, int data);
int (*alarm_irq_enable)(struct device *, unsigned int enabled); int (*alarm_irq_enable)(struct device *, unsigned int enabled);
......
/* linux/include/linux/tick.h /*
* * Tick related global functions
* This file contains the structure definitions for tick related functions
*
*/ */
#ifndef _LINUX_TICK_H #ifndef _LINUX_TICK_H
#define _LINUX_TICK_H #define _LINUX_TICK_H
...@@ -9,149 +7,99 @@ ...@@ -9,149 +7,99 @@
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/context_tracking_state.h> #include <linux/context_tracking_state.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/sched.h> #include <linux/sched.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS
enum tick_device_mode {
TICKDEV_MODE_PERIODIC,
TICKDEV_MODE_ONESHOT,
};
struct tick_device {
struct clock_event_device *evtdev;
enum tick_device_mode mode;
};
enum tick_nohz_mode {
NOHZ_MODE_INACTIVE,
NOHZ_MODE_LOWRES,
NOHZ_MODE_HIGHRES,
};
/**
* struct tick_sched - sched tick emulation and no idle tick control/stats
* @sched_timer: hrtimer to schedule the periodic tick in high
* resolution mode
* @last_tick: Store the last tick expiry time when the tick
* timer is modified for nohz sleeps. This is necessary
* to resume the tick timer operation in the timeline
* when the CPU returns from nohz sleep.
* @tick_stopped: Indicator that the idle tick has been stopped
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_calls: Total number of idle calls
* @idle_sleeps: Number of idle calls, where the sched tick was stopped
* @idle_entrytime: Time when the idle call was entered
* @idle_waketime: Time when the idle was interrupted
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
* @sleep_length: Duration of the current idle sleep
* @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
struct tick_sched {
struct hrtimer sched_timer;
unsigned long check_clocks;
enum tick_nohz_mode nohz_mode;
ktime_t last_tick;
int inidle;
int tick_stopped;
unsigned long idle_jiffies;
unsigned long idle_calls;
unsigned long idle_sleeps;
int idle_active;
ktime_t idle_entrytime;
ktime_t idle_waketime;
ktime_t idle_exittime;
ktime_t idle_sleeptime;
ktime_t iowait_sleeptime;
ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
ktime_t idle_expires;
int do_timer_last;
};
extern void __init tick_init(void); extern void __init tick_init(void);
extern int tick_is_oneshot_available(void);
extern struct tick_device *tick_get_device(int cpu);
extern void tick_freeze(void); extern void tick_freeze(void);
extern void tick_unfreeze(void); extern void tick_unfreeze(void);
/* Should be core only, but ARM BL switcher requires it */
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
extern void tick_resume_local(void);
extern void tick_handover_do_timer(void);
extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_freeze(void) { }
static inline void tick_unfreeze(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
static inline void tick_handover_do_timer(void) { }
static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_TICK_ONESHOT
extern int tick_init_highres(void);
extern int tick_program_event(ktime_t expires, int force);
extern void tick_setup_sched_timer(void);
# endif
# if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
extern void tick_cancel_sched_timer(int cpu);
# else
static inline void tick_cancel_sched_timer(int cpu) { }
# endif
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
extern struct tick_device *tick_get_broadcast_device(void);
extern struct cpumask *tick_get_broadcast_mask(void);
# ifdef CONFIG_TICK_ONESHOT
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
# endif
# endif /* BROADCAST */
# ifdef CONFIG_TICK_ONESHOT
extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_irq_enter(void); extern void tick_irq_enter(void);
extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu # ifndef arch_needs_cpu
# define arch_needs_cpu() (0) # define arch_needs_cpu() (0)
# endif # endif
# else # else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_irq_enter(void) { } static inline void tick_irq_enter(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; } #endif
# endif
#else /* CONFIG_GENERIC_CLOCKEVENTS */ #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
static inline void tick_init(void) { } extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
static inline void tick_freeze(void) { } #else
static inline void tick_unfreeze(void) { } static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
static inline void tick_cancel_sched_timer(int cpu) { } #endif
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_irq_enter(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ_COMMON enum tick_broadcast_mode {
DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched); TICK_BROADCAST_OFF,
TICK_BROADCAST_ON,
TICK_BROADCAST_FORCE,
};
enum tick_broadcast_state {
TICK_BROADCAST_EXIT,
TICK_BROADCAST_ENTER,
};
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
extern void tick_broadcast_control(enum tick_broadcast_mode mode);
#else
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
#endif /* BROADCAST */
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
#else
static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; }
#endif
static inline int tick_nohz_tick_stopped(void) static inline void tick_broadcast_enable(void)
{
tick_broadcast_control(TICK_BROADCAST_ON);
}
static inline void tick_broadcast_disable(void)
{
tick_broadcast_control(TICK_BROADCAST_OFF);
}
static inline void tick_broadcast_force(void)
{
tick_broadcast_control(TICK_BROADCAST_FORCE);
}
static inline int tick_broadcast_enter(void)
{ {
return __this_cpu_read(tick_cpu_sched.tick_stopped); return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
}
static inline void tick_broadcast_exit(void)
{
tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
} }
#ifdef CONFIG_NO_HZ_COMMON
extern int tick_nohz_tick_stopped(void);
extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void); extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void); extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void); extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
# else /* !CONFIG_NO_HZ_COMMON */ static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline int tick_nohz_tick_stopped(void)
{
return 0;
}
static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { } static inline void tick_nohz_idle_exit(void) { }
...@@ -163,7 +111,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) ...@@ -163,7 +111,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
} }
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !CONFIG_NO_HZ_COMMON */ #endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
extern bool tick_nohz_full_running; extern bool tick_nohz_full_running;
......
...@@ -16,16 +16,16 @@ ...@@ -16,16 +16,16 @@
* @read: Read function of @clock * @read: Read function of @clock
* @mask: Bitmask for two's complement subtraction of non 64bit clocks * @mask: Bitmask for two's complement subtraction of non 64bit clocks
* @cycle_last: @clock cycle value at last update * @cycle_last: @clock cycle value at last update
* @mult: NTP adjusted multiplier for scaled math conversion * @mult: (NTP adjusted) multiplier for scaled math conversion
* @shift: Shift value for scaled math conversion * @shift: Shift value for scaled math conversion
* @xtime_nsec: Shifted (fractional) nano seconds offset for readout * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
* @base_mono: ktime_t (nanoseconds) base time for readout * @base: ktime_t (nanoseconds) base time for readout
* *
* This struct has size 56 byte on 64 bit. Together with a seqcount it * This struct has size 56 byte on 64 bit. Together with a seqcount it
* occupies a single 64byte cache line. * occupies a single 64byte cache line.
* *
* The struct is separate from struct timekeeper as it is also used * The struct is separate from struct timekeeper as it is also used
* for a fast NMI safe accessor to clock monotonic. * for a fast NMI safe accessors.
*/ */
struct tk_read_base { struct tk_read_base {
struct clocksource *clock; struct clocksource *clock;
...@@ -35,12 +35,13 @@ struct tk_read_base { ...@@ -35,12 +35,13 @@ struct tk_read_base {
u32 mult; u32 mult;
u32 shift; u32 shift;
u64 xtime_nsec; u64 xtime_nsec;
ktime_t base_mono; ktime_t base;
}; };
/** /**
* struct timekeeper - Structure holding internal timekeeping values. * struct timekeeper - Structure holding internal timekeeping values.
* @tkr: The readout base structure * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
* @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
* @xtime_sec: Current CLOCK_REALTIME time in seconds * @xtime_sec: Current CLOCK_REALTIME time in seconds
* @ktime_sec: Current CLOCK_MONOTONIC time in seconds * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
...@@ -48,7 +49,6 @@ struct tk_read_base { ...@@ -48,7 +49,6 @@ struct tk_read_base {
* @offs_boot: Offset clock monotonic -> clock boottime * @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai * @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds * @tai_offset: The current UTC to TAI offset in seconds
* @base_raw: Monotonic raw base time in ktime_t format
* @raw_time: Monotonic raw base time in timespec64 format * @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval * @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP * @xtime_interval: Number of clock shifted nano seconds in one NTP
...@@ -76,7 +76,8 @@ struct tk_read_base { ...@@ -76,7 +76,8 @@ struct tk_read_base {
* used instead. * used instead.
*/ */
struct timekeeper { struct timekeeper {
struct tk_read_base tkr; struct tk_read_base tkr_mono;
struct tk_read_base tkr_raw;
u64 xtime_sec; u64 xtime_sec;
unsigned long ktime_sec; unsigned long ktime_sec;
struct timespec64 wall_to_monotonic; struct timespec64 wall_to_monotonic;
...@@ -84,7 +85,6 @@ struct timekeeper { ...@@ -84,7 +85,6 @@ struct timekeeper {
ktime_t offs_boot; ktime_t offs_boot;
ktime_t offs_tai; ktime_t offs_tai;
s32 tai_offset; s32 tai_offset;
ktime_t base_raw;
struct timespec64 raw_time; struct timespec64 raw_time;
/* The following members are for timekeeping internal use */ /* The following members are for timekeeping internal use */
......
...@@ -214,12 +214,18 @@ static inline u64 ktime_get_boot_ns(void) ...@@ -214,12 +214,18 @@ static inline u64 ktime_get_boot_ns(void)
return ktime_to_ns(ktime_get_boottime()); return ktime_to_ns(ktime_get_boottime());
} }
static inline u64 ktime_get_tai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}
static inline u64 ktime_get_raw_ns(void) static inline u64 ktime_get_raw_ns(void)
{ {
return ktime_to_ns(ktime_get_raw()); return ktime_to_ns(ktime_get_raw());
} }
extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
/* /*
* Timespec interfaces utilizing the ktime based ones * Timespec interfaces utilizing the ktime based ones
...@@ -242,6 +248,9 @@ static inline void timekeeping_clocktai(struct timespec *ts) ...@@ -242,6 +248,9 @@ static inline void timekeeping_clocktai(struct timespec *ts)
/* /*
* RTC specific * RTC specific
*/ */
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
/* /*
...@@ -253,17 +262,14 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw, ...@@ -253,17 +262,14 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw,
/* /*
* Persistent clock related interfaces * Persistent clock related interfaces
*/ */
extern bool persistent_clock_exist;
extern int persistent_clock_is_local; extern int persistent_clock_is_local;
static inline bool has_persistent_clock(void)
{
return persistent_clock_exist;
}
extern void read_persistent_clock(struct timespec *ts); extern void read_persistent_clock(struct timespec *ts);
extern void read_persistent_clock64(struct timespec64 *ts);
extern void read_boot_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts);
extern void read_boot_clock64(struct timespec64 *ts);
extern int update_persistent_clock(struct timespec now); extern int update_persistent_clock(struct timespec now);
extern int update_persistent_clock64(struct timespec64 now);
#endif #endif
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/tick.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include "smpboot.h" #include "smpboot.h"
...@@ -338,6 +339,8 @@ static int __ref take_cpu_down(void *_param) ...@@ -338,6 +339,8 @@ static int __ref take_cpu_down(void *_param)
return err; return err;
cpu_notify(CPU_DYING | param->mod, param->hcpu); cpu_notify(CPU_DYING | param->mod, param->hcpu);
/* Give up timekeeping duties */
tick_handover_do_timer();
/* Park the stopper thread */ /* Park the stopper thread */
kthread_park(current); kthread_park(current);
return 0; return 0;
...@@ -411,10 +414,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -411,10 +414,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
while (!idle_cpu(cpu)) while (!idle_cpu(cpu))
cpu_relax(); cpu_relax();
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */ /* This actually kills the CPU. */
__cpu_die(cpu); __cpu_die(cpu);
/* CPU is completely dead: tell everyone. Too late to complain. */ /* CPU is completely dead: tell everyone. Too late to complain. */
tick_cleanup_dead_cpu(cpu);
cpu_notify_nofail(CPU_DEAD | mod, hcpu); cpu_notify_nofail(CPU_DEAD | mod, hcpu);
check_for_tasks(cpu); check_for_tasks(cpu);
......
...@@ -158,8 +158,7 @@ static void cpuidle_idle_call(void) ...@@ -158,8 +158,7 @@ static void cpuidle_idle_call(void)
* is used from another cpu as a broadcast timer, this call may * is used from another cpu as a broadcast timer, this call may
* fail if it is not available * fail if it is not available
*/ */
if (broadcast && if (broadcast && tick_broadcast_enter())
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
goto use_default; goto use_default;
/* Take note of the planned idle state. */ /* Take note of the planned idle state. */
...@@ -176,7 +175,7 @@ static void cpuidle_idle_call(void) ...@@ -176,7 +175,7 @@ static void cpuidle_idle_call(void)
idle_set_state(this_rq(), NULL); idle_set_state(this_rq(), NULL);
if (broadcast) if (broadcast)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); tick_broadcast_exit();
/* /*
* Give the governor an opportunity to reflect on the outcome * Give the governor an opportunity to reflect on the outcome
......
...@@ -33,12 +33,6 @@ config ARCH_USES_GETTIMEOFFSET ...@@ -33,12 +33,6 @@ config ARCH_USES_GETTIMEOFFSET
config GENERIC_CLOCKEVENTS config GENERIC_CLOCKEVENTS
bool bool
# Migration helper. Builds, but does not invoke
config GENERIC_CLOCKEVENTS_BUILD
bool
default y
depends on GENERIC_CLOCKEVENTS
# Architecture can handle broadcast in a driver-agnostic way # Architecture can handle broadcast in a driver-agnostic way
config ARCH_HAS_TICK_BROADCAST config ARCH_HAS_TICK_BROADCAST
bool bool
......
...@@ -2,15 +2,13 @@ obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o ...@@ -2,15 +2,13 @@ obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o tick-common.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y) ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
obj-y += tick-broadcast.o obj-y += tick-broadcast.o
obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o
endif endif
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
obj-$(CONFIG_TIMER_STATS) += timer_stats.o obj-$(CONFIG_TIMER_STATS) += timer_stats.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
......
This diff is collapsed.
...@@ -142,13 +142,6 @@ static void __clocksource_unstable(struct clocksource *cs) ...@@ -142,13 +142,6 @@ static void __clocksource_unstable(struct clocksource *cs)
schedule_work(&watchdog_work); schedule_work(&watchdog_work);
} }
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
{
printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
cs->name, delta);
__clocksource_unstable(cs);
}
/** /**
* clocksource_mark_unstable - mark clocksource unstable via watchdog * clocksource_mark_unstable - mark clocksource unstable via watchdog
* @cs: clocksource to be marked unstable * @cs: clocksource to be marked unstable
...@@ -174,7 +167,7 @@ void clocksource_mark_unstable(struct clocksource *cs) ...@@ -174,7 +167,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
static void clocksource_watchdog(unsigned long data) static void clocksource_watchdog(unsigned long data)
{ {
struct clocksource *cs; struct clocksource *cs;
cycle_t csnow, wdnow, delta; cycle_t csnow, wdnow, cslast, wdlast, delta;
int64_t wd_nsec, cs_nsec; int64_t wd_nsec, cs_nsec;
int next_cpu, reset_pending; int next_cpu, reset_pending;
...@@ -213,6 +206,8 @@ static void clocksource_watchdog(unsigned long data) ...@@ -213,6 +206,8 @@ static void clocksource_watchdog(unsigned long data)
delta = clocksource_delta(csnow, cs->cs_last, cs->mask); delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
wdlast = cs->wd_last; /* save these in case we print them */
cslast = cs->cs_last;
cs->cs_last = csnow; cs->cs_last = csnow;
cs->wd_last = wdnow; cs->wd_last = wdnow;
...@@ -221,7 +216,12 @@ static void clocksource_watchdog(unsigned long data) ...@@ -221,7 +216,12 @@ static void clocksource_watchdog(unsigned long data)
/* Check the deviation from the watchdog clocksource. */ /* Check the deviation from the watchdog clocksource. */
if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
clocksource_unstable(cs, cs_nsec - wd_nsec); pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable, because the skew is too large:\n", cs->name);
pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
watchdog->name, wdnow, wdlast, watchdog->mask);
pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
cs->name, csnow, cslast, cs->mask);
__clocksource_unstable(cs);
continue; continue;
} }
...@@ -469,26 +469,25 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) ...@@ -469,26 +469,25 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
* @shift: cycle to nanosecond divisor (power of two) * @shift: cycle to nanosecond divisor (power of two)
* @maxadj: maximum adjustment value to mult (~11%) * @maxadj: maximum adjustment value to mult (~11%)
* @mask: bitmask for two's complement subtraction of non 64 bit counters * @mask: bitmask for two's complement subtraction of non 64 bit counters
* @max_cyc: maximum cycle value before potential overflow (does not include
* any safety margin)
*
* NOTE: This function includes a safety margin of 50%, in other words, we
* return half the number of nanoseconds the hardware counter can technically
* cover. This is done so that we can potentially detect problems caused by
* delayed timers or bad hardware, which might result in time intervals that
* are larger then what the math used can handle without overflows.
*/ */
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
{ {
u64 max_nsecs, max_cycles; u64 max_nsecs, max_cycles;
/* /*
* Calculate the maximum number of cycles that we can pass to the * Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64-bit signed result. The * cyc2ns() function without overflowing a 64-bit result.
* maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj)
* which is equivalent to the below.
* max_cycles < (2^63)/(mult + maxadj)
* max_cycles < 2^(log2((2^63)/(mult + maxadj)))
* max_cycles < 2^(log2(2^63) - log2(mult + maxadj))
* max_cycles < 2^(63 - log2(mult + maxadj))
* max_cycles < 1 << (63 - log2(mult + maxadj))
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors, ensure the above inequality is satisfied and
* no overflow will occur.
*/ */
max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1)); max_cycles = ULLONG_MAX;
do_div(max_cycles, mult+maxadj);
/* /*
* The actual maximum number of cycles we can defer the clocksource is * The actual maximum number of cycles we can defer the clocksource is
...@@ -499,27 +498,26 @@ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) ...@@ -499,27 +498,26 @@ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
max_cycles = min(max_cycles, mask); max_cycles = min(max_cycles, mask);
max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
/* return the max_cycles value as well if requested */
if (max_cyc)
*max_cyc = max_cycles;
/* Return 50% of the actual maximum, so we can detect bad values */
max_nsecs >>= 1;
return max_nsecs; return max_nsecs;
} }
/** /**
* clocksource_max_deferment - Returns max time the clocksource can be deferred * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
* @cs: Pointer to clocksource * @cs: Pointer to clocksource to be updated
* *
*/ */
static u64 clocksource_max_deferment(struct clocksource *cs) static inline void clocksource_update_max_deferment(struct clocksource *cs)
{ {
u64 max_nsecs; cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
cs->maxadj, cs->mask,
max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj, &cs->max_cycles);
cs->mask);
/*
* To ensure that the clocksource does not wrap whilst we are idle,
* limit the time the clocksource can be deferred by 12.5%. Please
* note a margin of 12.5% is used because this can be computed with
* a shift, versus say 10% which would require division.
*/
return max_nsecs - (max_nsecs >> 3);
} }
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
...@@ -648,7 +646,7 @@ static void clocksource_enqueue(struct clocksource *cs) ...@@ -648,7 +646,7 @@ static void clocksource_enqueue(struct clocksource *cs)
} }
/** /**
* __clocksource_updatefreq_scale - Used update clocksource with new freq * __clocksource_update_freq_scale - Used update clocksource with new freq
* @cs: clocksource to be registered * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz * @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale * @freq: clocksource frequency (cycles per second) divided by scale
...@@ -656,48 +654,64 @@ static void clocksource_enqueue(struct clocksource *cs) ...@@ -656,48 +654,64 @@ static void clocksource_enqueue(struct clocksource *cs)
* This should only be called from the clocksource->enable() method. * This should only be called from the clocksource->enable() method.
* *
* This *SHOULD NOT* be called directly! Please use the * This *SHOULD NOT* be called directly! Please use the
* clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions. * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
* functions.
*/ */
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
{ {
u64 sec; u64 sec;
/* /*
* Calc the maximum number of seconds which we can run before * Default clocksources are *special* and self-define their mult/shift.
* wrapping around. For clocksources which have a mask > 32bit * But, you're not special, so you should specify a freq value.
* we need to limit the max sleep time to have a good
* conversion precision. 10 minutes is still a reasonable
* amount. That results in a shift value of 24 for a
* clocksource with mask >= 40bit and f >= 4GHz. That maps to
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
* margin as we do in clocksource_max_deferment()
*/ */
sec = (cs->mask - (cs->mask >> 3)); if (freq) {
do_div(sec, freq); /*
do_div(sec, scale); * Calc the maximum number of seconds which we can run before
if (!sec) * wrapping around. For clocksources which have a mask > 32-bit
sec = 1; * we need to limit the max sleep time to have a good
else if (sec > 600 && cs->mask > UINT_MAX) * conversion precision. 10 minutes is still a reasonable
sec = 600; * amount. That results in a shift value of 24 for a
* clocksource with mask >= 40-bit and f >= 4GHz. That maps to
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, * ~ 0.06ppm granularity for NTP.
NSEC_PER_SEC / scale, sec * scale); */
sec = cs->mask;
do_div(sec, freq);
do_div(sec, scale);
if (!sec)
sec = 1;
else if (sec > 600 && cs->mask > UINT_MAX)
sec = 600;
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC / scale, sec * scale);
}
/* /*
* for clocksources that have large mults, to avoid overflow. * Ensure clocksources that have large 'mult' values don't overflow
* Since mult may be adjusted by ntp, add an safety extra margin * when adjusted.
*
*/ */
cs->maxadj = clocksource_max_adjustment(cs); cs->maxadj = clocksource_max_adjustment(cs);
while ((cs->mult + cs->maxadj < cs->mult) while (freq && ((cs->mult + cs->maxadj < cs->mult)
|| (cs->mult - cs->maxadj > cs->mult)) { || (cs->mult - cs->maxadj > cs->mult))) {
cs->mult >>= 1; cs->mult >>= 1;
cs->shift--; cs->shift--;
cs->maxadj = clocksource_max_adjustment(cs); cs->maxadj = clocksource_max_adjustment(cs);
} }
cs->max_idle_ns = clocksource_max_deferment(cs); /*
* Only warn for *special* clocksources that self-define
* their mult/shift values and don't specify a freq.
*/
WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
cs->name);
clocksource_update_max_deferment(cs);
pr_info("clocksource %s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
} }
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
/** /**
* __clocksource_register_scale - Used to install new clocksources * __clocksource_register_scale - Used to install new clocksources
...@@ -714,7 +728,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) ...@@ -714,7 +728,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{ {
/* Initialize mult/shift and max_idle_ns */ /* Initialize mult/shift and max_idle_ns */
__clocksource_updatefreq_scale(cs, scale, freq); __clocksource_update_freq_scale(cs, scale, freq);
/* Add clocksource to the clocksource list */ /* Add clocksource to the clocksource list */
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
...@@ -726,33 +740,6 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) ...@@ -726,33 +740,6 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
} }
EXPORT_SYMBOL_GPL(__clocksource_register_scale); EXPORT_SYMBOL_GPL(__clocksource_register_scale);
/**
* clocksource_register - Used to install new clocksources
* @cs: clocksource to be registered
*
* Returns -EBUSY if registration fails, zero otherwise.
*/
int clocksource_register(struct clocksource *cs)
{
/* calculate max adjustment for given mult/shift */
cs->maxadj = clocksource_max_adjustment(cs);
WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
"Clocksource %s might overflow on 11%% adjustment\n",
cs->name);
/* calculate max idle time permitted for this clocksource */
cs->max_idle_ns = clocksource_max_deferment(cs);
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs);
clocksource_select();
mutex_unlock(&clocksource_mutex);
return 0;
}
EXPORT_SYMBOL(clocksource_register);
static void __clocksource_change_rating(struct clocksource *cs, int rating) static void __clocksource_change_rating(struct clocksource *cs, int rating)
{ {
list_del(&cs->list); list_del(&cs->list);
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#include <trace/events/timer.h> #include <trace/events/timer.h>
#include "timekeeping.h" #include "tick-internal.h"
/* /*
* The timer bases: * The timer bases:
...@@ -1707,17 +1707,10 @@ static int hrtimer_cpu_notify(struct notifier_block *self, ...@@ -1707,17 +1707,10 @@ static int hrtimer_cpu_notify(struct notifier_block *self,
break; break;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
case CPU_DYING:
case CPU_DYING_FROZEN:
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
{
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
migrate_hrtimers(scpu); migrate_hrtimers(scpu);
break; break;
}
#endif #endif
default: default:
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include "tick-internal.h" #include "timekeeping.h"
/* The Jiffies based clocksource is the lowest common /* The Jiffies based clocksource is the lowest common
* denominator clock source which should function on * denominator clock source which should function on
...@@ -71,6 +71,7 @@ static struct clocksource clocksource_jiffies = { ...@@ -71,6 +71,7 @@ static struct clocksource clocksource_jiffies = {
.mask = 0xffffffff, /*32bits*/ .mask = 0xffffffff, /*32bits*/
.mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
.shift = JIFFIES_SHIFT, .shift = JIFFIES_SHIFT,
.max_cycles = 10,
}; };
__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); __cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
...@@ -94,7 +95,7 @@ EXPORT_SYMBOL(jiffies); ...@@ -94,7 +95,7 @@ EXPORT_SYMBOL(jiffies);
static int __init init_jiffies_clocksource(void) static int __init init_jiffies_clocksource(void)
{ {
return clocksource_register(&clocksource_jiffies); return __clocksource_register(&clocksource_jiffies);
} }
core_initcall(init_jiffies_clocksource); core_initcall(init_jiffies_clocksource);
...@@ -130,6 +131,6 @@ int register_refined_jiffies(long cycles_per_second) ...@@ -130,6 +131,6 @@ int register_refined_jiffies(long cycles_per_second)
refined_jiffies.mult = ((u32)nsec_per_tick) << JIFFIES_SHIFT; refined_jiffies.mult = ((u32)nsec_per_tick) << JIFFIES_SHIFT;
clocksource_register(&refined_jiffies); __clocksource_register(&refined_jiffies);
return 0; return 0;
} }
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/rtc.h> #include <linux/rtc.h>
#include "tick-internal.h"
#include "ntp_internal.h" #include "ntp_internal.h"
/* /*
...@@ -459,6 +458,16 @@ int second_overflow(unsigned long secs) ...@@ -459,6 +458,16 @@ int second_overflow(unsigned long secs)
return leap; return leap;
} }
#ifdef CONFIG_GENERIC_CMOS_UPDATE
int __weak update_persistent_clock64(struct timespec64 now64)
{
struct timespec now;
now = timespec64_to_timespec(now64);
return update_persistent_clock(now);
}
#endif
#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
static void sync_cmos_clock(struct work_struct *work); static void sync_cmos_clock(struct work_struct *work);
...@@ -494,8 +503,9 @@ static void sync_cmos_clock(struct work_struct *work) ...@@ -494,8 +503,9 @@ static void sync_cmos_clock(struct work_struct *work)
if (persistent_clock_is_local) if (persistent_clock_is_local)
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
#ifdef CONFIG_GENERIC_CMOS_UPDATE #ifdef CONFIG_GENERIC_CMOS_UPDATE
fail = update_persistent_clock(timespec64_to_timespec(adjust)); fail = update_persistent_clock64(adjust);
#endif #endif
#ifdef CONFIG_RTC_SYSTOHC #ifdef CONFIG_RTC_SYSTOHC
if (fail == -ENODEV) if (fail == -ENODEV)
fail = rtc_set_ntp_time(adjust); fail = rtc_set_ntp_time(adjust);
......
This diff is collapsed.
This diff is collapsed.
...@@ -102,7 +102,7 @@ void tick_handle_periodic(struct clock_event_device *dev) ...@@ -102,7 +102,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
tick_periodic(cpu); tick_periodic(cpu);
if (dev->mode != CLOCK_EVT_MODE_ONESHOT) if (dev->state != CLOCK_EVT_STATE_ONESHOT)
return; return;
for (;;) { for (;;) {
/* /*
...@@ -140,7 +140,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ...@@ -140,7 +140,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
!tick_broadcast_oneshot_active()) { !tick_broadcast_oneshot_active()) {
clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
} else { } else {
unsigned long seq; unsigned long seq;
ktime_t next; ktime_t next;
...@@ -150,7 +150,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ...@@ -150,7 +150,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
next = tick_next_period; next = tick_next_period;
} while (read_seqretry(&jiffies_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) { for (;;) {
if (!clockevents_program_event(dev, next, false)) if (!clockevents_program_event(dev, next, false))
...@@ -332,14 +332,16 @@ void tick_check_new_device(struct clock_event_device *newdev) ...@@ -332,14 +332,16 @@ void tick_check_new_device(struct clock_event_device *newdev)
tick_install_broadcast_device(newdev); tick_install_broadcast_device(newdev);
} }
#ifdef CONFIG_HOTPLUG_CPU
/* /*
* Transfer the do_timer job away from a dying cpu. * Transfer the do_timer job away from a dying cpu.
* *
* Called with interrupts disabled. * Called with interrupts disabled. Not locking required. If
* tick_do_timer_cpu is owned by this cpu, nothing can change it.
*/ */
void tick_handover_do_timer(int *cpup) void tick_handover_do_timer(void)
{ {
if (*cpup == tick_do_timer_cpu) { if (tick_do_timer_cpu == smp_processor_id()) {
int cpu = cpumask_first(cpu_online_mask); int cpu = cpumask_first(cpu_online_mask);
tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
...@@ -354,9 +356,9 @@ void tick_handover_do_timer(int *cpup) ...@@ -354,9 +356,9 @@ void tick_handover_do_timer(int *cpup)
* access the hardware device itself. * access the hardware device itself.
* We just set the mode and remove it from the lists. * We just set the mode and remove it from the lists.
*/ */
void tick_shutdown(unsigned int *cpup) void tick_shutdown(unsigned int cpu)
{ {
struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
struct clock_event_device *dev = td->evtdev; struct clock_event_device *dev = td->evtdev;
td->mode = TICKDEV_MODE_PERIODIC; td->mode = TICKDEV_MODE_PERIODIC;
...@@ -365,27 +367,42 @@ void tick_shutdown(unsigned int *cpup) ...@@ -365,27 +367,42 @@ void tick_shutdown(unsigned int *cpup)
* Prevent that the clock events layer tries to call * Prevent that the clock events layer tries to call
* the set mode function! * the set mode function!
*/ */
dev->state = CLOCK_EVT_STATE_DETACHED;
dev->mode = CLOCK_EVT_MODE_UNUSED; dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_exchange_device(dev, NULL); clockevents_exchange_device(dev, NULL);
dev->event_handler = clockevents_handle_noop; dev->event_handler = clockevents_handle_noop;
td->evtdev = NULL; td->evtdev = NULL;
} }
} }
#endif
void tick_suspend(void) /**
* tick_suspend_local - Suspend the local tick device
*
* Called from the local cpu for freeze with interrupts disabled.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_suspend_local(void)
{ {
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
clockevents_shutdown(td->evtdev); clockevents_shutdown(td->evtdev);
} }
void tick_resume(void) /**
* tick_resume_local - Resume the local tick device
*
* Called from the local CPU for unfreeze or XEN resume magic.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_resume_local(void)
{ {
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int broadcast = tick_resume_broadcast(); bool broadcast = tick_resume_check_broadcast();
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
clockevents_tick_resume(td->evtdev);
if (!broadcast) { if (!broadcast) {
if (td->mode == TICKDEV_MODE_PERIODIC) if (td->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(td->evtdev, 0); tick_setup_periodic(td->evtdev, 0);
...@@ -394,6 +411,35 @@ void tick_resume(void) ...@@ -394,6 +411,35 @@ void tick_resume(void)
} }
} }
/**
* tick_suspend - Suspend the tick and the broadcast device
*
* Called from syscore_suspend() via timekeeping_suspend with only one
* CPU online and interrupts disabled or from tick_unfreeze() under
* tick_freeze_lock.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_suspend(void)
{
tick_suspend_local();
tick_suspend_broadcast();
}
/**
* tick_resume - Resume the tick and the broadcast device
*
* Called from syscore_resume() via timekeeping_resume with only one
* CPU online and interrupts disabled.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_resume(void)
{
tick_resume_broadcast();
tick_resume_local();
}
static DEFINE_RAW_SPINLOCK(tick_freeze_lock); static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth; static unsigned int tick_freeze_depth;
...@@ -411,12 +457,10 @@ void tick_freeze(void) ...@@ -411,12 +457,10 @@ void tick_freeze(void)
raw_spin_lock(&tick_freeze_lock); raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++; tick_freeze_depth++;
if (tick_freeze_depth == num_online_cpus()) { if (tick_freeze_depth == num_online_cpus())
timekeeping_suspend(); timekeeping_suspend();
} else { else
tick_suspend(); tick_suspend_local();
tick_suspend_broadcast();
}
raw_spin_unlock(&tick_freeze_lock); raw_spin_unlock(&tick_freeze_lock);
} }
...@@ -437,7 +481,7 @@ void tick_unfreeze(void) ...@@ -437,7 +481,7 @@ void tick_unfreeze(void)
if (tick_freeze_depth == num_online_cpus()) if (tick_freeze_depth == num_online_cpus())
timekeeping_resume(); timekeeping_resume();
else else
tick_resume(); tick_resume_local();
tick_freeze_depth--; tick_freeze_depth--;
......
This diff is collapsed.
...@@ -38,7 +38,7 @@ void tick_resume_oneshot(void) ...@@ -38,7 +38,7 @@ void tick_resume_oneshot(void)
{ {
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
clockevents_program_event(dev, ktime_get(), true); clockevents_program_event(dev, ktime_get(), true);
} }
...@@ -50,7 +50,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, ...@@ -50,7 +50,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
ktime_t next_event) ktime_t next_event)
{ {
newdev->event_handler = handler; newdev->event_handler = handler;
clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT);
clockevents_program_event(newdev, next_event, true); clockevents_program_event(newdev, next_event, true);
} }
...@@ -81,7 +81,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) ...@@ -81,7 +81,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
td->mode = TICKDEV_MODE_ONESHOT; td->mode = TICKDEV_MODE_ONESHOT;
dev->event_handler = handler; dev->event_handler = handler;
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
tick_broadcast_switch_to_oneshot(); tick_broadcast_switch_to_oneshot();
return 0; return 0;
} }
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
/* /*
* Per cpu nohz control structure * Per cpu nohz control structure
*/ */
DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/* /*
* The time, when the last jiffy update happened. Protected by jiffies_lock. * The time, when the last jiffy update happened. Protected by jiffies_lock.
...@@ -416,6 +416,11 @@ static int __init setup_tick_nohz(char *str) ...@@ -416,6 +416,11 @@ static int __init setup_tick_nohz(char *str)
__setup("nohz=", setup_tick_nohz); __setup("nohz=", setup_tick_nohz);
int tick_nohz_tick_stopped(void)
{
return __this_cpu_read(tick_cpu_sched.tick_stopped);
}
/** /**
* tick_nohz_update_jiffies - update jiffies when idle was interrupted * tick_nohz_update_jiffies - update jiffies when idle was interrupted
* *
......
#ifndef _TICK_SCHED_H
#define _TICK_SCHED_H
#include <linux/hrtimer.h>
enum tick_device_mode {
TICKDEV_MODE_PERIODIC,
TICKDEV_MODE_ONESHOT,
};
struct tick_device {
struct clock_event_device *evtdev;
enum tick_device_mode mode;
};
enum tick_nohz_mode {
NOHZ_MODE_INACTIVE,
NOHZ_MODE_LOWRES,
NOHZ_MODE_HIGHRES,
};
/**
* struct tick_sched - sched tick emulation and no idle tick control/stats
* @sched_timer: hrtimer to schedule the periodic tick in high
* resolution mode
* @last_tick: Store the last tick expiry time when the tick
* timer is modified for nohz sleeps. This is necessary
* to resume the tick timer operation in the timeline
* when the CPU returns from nohz sleep.
* @tick_stopped: Indicator that the idle tick has been stopped
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_calls: Total number of idle calls
* @idle_sleeps: Number of idle calls, where the sched tick was stopped
* @idle_entrytime: Time when the idle call was entered
* @idle_waketime: Time when the idle was interrupted
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
* @sleep_length: Duration of the current idle sleep
* @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
struct tick_sched {
struct hrtimer sched_timer;
unsigned long check_clocks;
enum tick_nohz_mode nohz_mode;
ktime_t last_tick;
int inidle;
int tick_stopped;
unsigned long idle_jiffies;
unsigned long idle_calls;
unsigned long idle_sleeps;
int idle_active;
ktime_t idle_entrytime;
ktime_t idle_waketime;
ktime_t idle_exittime;
ktime_t idle_sleeptime;
ktime_t iowait_sleeptime;
ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
ktime_t idle_expires;
int do_timer_last;
};
extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_setup_sched_timer(void);
#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
extern void tick_cancel_sched_timer(int cpu);
#else
static inline void tick_cancel_sched_timer(int cpu) { }
#endif
#endif
This diff is collapsed.
...@@ -19,4 +19,11 @@ extern void timekeeping_clocktai(struct timespec *ts); ...@@ -19,4 +19,11 @@ extern void timekeeping_clocktai(struct timespec *ts);
extern int timekeeping_suspend(void); extern int timekeeping_suspend(void);
extern void timekeeping_resume(void); extern void timekeeping_resume(void);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
extern seqlock_t jiffies_lock;
#define CS_NAME_LEN 32
#endif #endif
...@@ -90,8 +90,18 @@ struct tvec_base { ...@@ -90,8 +90,18 @@ struct tvec_base {
struct tvec tv5; struct tvec tv5;
} ____cacheline_aligned; } ____cacheline_aligned;
/*
* __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
* made NULL special, hint: lock_timer_base()) and we cannot get a compile time
* pointer to per-cpu entries because we don't know where we'll map the section,
* even for the boot cpu.
*
* And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
* rest of them.
*/
struct tvec_base boot_tvec_bases; struct tvec_base boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases); EXPORT_SYMBOL(boot_tvec_bases);
static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
/* Functions below help us manage 'deferrable' flag */ /* Functions below help us manage 'deferrable' flag */
...@@ -1027,6 +1037,8 @@ int try_to_del_timer_sync(struct timer_list *timer) ...@@ -1027,6 +1037,8 @@ int try_to_del_timer_sync(struct timer_list *timer)
EXPORT_SYMBOL(try_to_del_timer_sync); EXPORT_SYMBOL(try_to_del_timer_sync);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
/** /**
* del_timer_sync - deactivate a timer and wait for the handler to finish. * del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated * @timer: the timer to be deactivated
...@@ -1532,64 +1544,6 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout) ...@@ -1532,64 +1544,6 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
} }
EXPORT_SYMBOL(schedule_timeout_uninterruptible); EXPORT_SYMBOL(schedule_timeout_uninterruptible);
static int init_timers_cpu(int cpu)
{
int j;
struct tvec_base *base;
static char tvec_base_done[NR_CPUS];
if (!tvec_base_done[cpu]) {
static char boot_done;
if (boot_done) {
/*
* The APs use this path later in boot
*/
base = kzalloc_node(sizeof(*base), GFP_KERNEL,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
/* Make sure tvec_base has TIMER_FLAG_MASK bits free */
if (WARN_ON(base != tbase_get_base(base))) {
kfree(base);
return -ENOMEM;
}
per_cpu(tvec_bases, cpu) = base;
} else {
/*
* This is for the boot CPU - we use compile-time
* static initialisation because per-cpu memory isn't
* ready yet and because the memory allocators are not
* initialised either.
*/
boot_done = 1;
base = &boot_tvec_bases;
}
spin_lock_init(&base->lock);
tvec_base_done[cpu] = 1;
base->cpu = cpu;
} else {
base = per_cpu(tvec_bases, cpu);
}
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);
INIT_LIST_HEAD(base->tv3.vec + j);
INIT_LIST_HEAD(base->tv2.vec + j);
}
for (j = 0; j < TVR_SIZE; j++)
INIT_LIST_HEAD(base->tv1.vec + j);
base->timer_jiffies = jiffies;
base->next_timer = base->timer_jiffies;
base->active_timers = 0;
base->all_timers = 0;
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
{ {
...@@ -1631,55 +1585,86 @@ static void migrate_timers(int cpu) ...@@ -1631,55 +1585,86 @@ static void migrate_timers(int cpu)
migrate_timer_list(new_base, old_base->tv5.vec + i); migrate_timer_list(new_base, old_base->tv5.vec + i);
} }
old_base->active_timers = 0;
old_base->all_timers = 0;
spin_unlock(&old_base->lock); spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock); spin_unlock_irq(&new_base->lock);
put_cpu_var(tvec_bases); put_cpu_var(tvec_bases);
} }
#endif /* CONFIG_HOTPLUG_CPU */
static int timer_cpu_notify(struct notifier_block *self, static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
long cpu = (long)hcpu; switch (action) {
int err;
switch(action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
err = init_timers_cpu(cpu);
if (err < 0)
return notifier_from_errno(err);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
migrate_timers(cpu); migrate_timers((long)hcpu);
break; break;
#endif
default: default:
break; break;
} }
return NOTIFY_OK; return NOTIFY_OK;
} }
static struct notifier_block timers_nb = { static inline void timer_register_cpu_notifier(void)
.notifier_call = timer_cpu_notify, {
}; cpu_notifier(timer_cpu_notify, 0);
}
#else
static inline void timer_register_cpu_notifier(void) { }
#endif /* CONFIG_HOTPLUG_CPU */
static void __init init_timer_cpu(struct tvec_base *base, int cpu)
{
int j;
void __init init_timers(void) BUG_ON(base != tbase_get_base(base));
base->cpu = cpu;
per_cpu(tvec_bases, cpu) = base;
spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);
INIT_LIST_HEAD(base->tv3.vec + j);
INIT_LIST_HEAD(base->tv2.vec + j);
}
for (j = 0; j < TVR_SIZE; j++)
INIT_LIST_HEAD(base->tv1.vec + j);
base->timer_jiffies = jiffies;
base->next_timer = base->timer_jiffies;
}
static void __init init_timer_cpus(void)
{ {
int err; struct tvec_base *base;
int local_cpu = smp_processor_id();
int cpu;
for_each_possible_cpu(cpu) {
if (cpu == local_cpu)
base = &boot_tvec_bases;
#ifdef CONFIG_SMP
else
base = per_cpu_ptr(&__tvec_bases, cpu);
#endif
init_timer_cpu(base, cpu);
}
}
void __init init_timers(void)
{
/* ensure there are enough low bits for flags in timer->base pointer */ /* ensure there are enough low bits for flags in timer->base pointer */
BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, init_timer_cpus();
(void *)(long)smp_processor_id());
BUG_ON(err != NOTIFY_OK);
init_timer_stats(); init_timer_stats();
register_cpu_notifier(&timers_nb); timer_register_cpu_notifier();
open_softirq(TIMER_SOFTIRQ, run_timer_softirq); open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
} }
......
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/tick.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "tick-internal.h"
struct timer_list_iter { struct timer_list_iter {
int cpu; int cpu;
...@@ -228,9 +228,35 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) ...@@ -228,9 +228,35 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
print_name_offset(m, dev->set_next_event); print_name_offset(m, dev->set_next_event);
SEQ_printf(m, "\n"); SEQ_printf(m, "\n");
SEQ_printf(m, " set_mode: "); if (dev->set_mode) {
print_name_offset(m, dev->set_mode); SEQ_printf(m, " set_mode: ");
SEQ_printf(m, "\n"); print_name_offset(m, dev->set_mode);
SEQ_printf(m, "\n");
} else {
if (dev->set_state_shutdown) {
SEQ_printf(m, " shutdown: ");
print_name_offset(m, dev->set_state_shutdown);
SEQ_printf(m, "\n");
}
if (dev->set_state_periodic) {
SEQ_printf(m, " periodic: ");
print_name_offset(m, dev->set_state_periodic);
SEQ_printf(m, "\n");
}
if (dev->set_state_oneshot) {
SEQ_printf(m, " oneshot: ");
print_name_offset(m, dev->set_state_oneshot);
SEQ_printf(m, "\n");
}
if (dev->tick_resume) {
SEQ_printf(m, " resume: ");
print_name_offset(m, dev->tick_resume);
SEQ_printf(m, "\n");
}
}
SEQ_printf(m, " event_handler: "); SEQ_printf(m, " event_handler: ");
print_name_offset(m, dev->event_handler); print_name_offset(m, dev->event_handler);
......
...@@ -865,6 +865,19 @@ config SCHED_STACK_END_CHECK ...@@ -865,6 +865,19 @@ config SCHED_STACK_END_CHECK
data corruption or a sporadic crash at a later stage once the region data corruption or a sporadic crash at a later stage once the region
is examined. The runtime overhead introduced is minimal. is examined. The runtime overhead introduced is minimal.
config DEBUG_TIMEKEEPING
bool "Enable extra timekeeping sanity checking"
help
This option will enable additional timekeeping sanity checks
which may be helpful when diagnosing issues where timekeeping
problems are suspected.
This may include checks in the timekeeping hotpaths, so this
option may have a (very small) performance impact to some
workloads.
If unsure, say N.
config TIMER_STATS config TIMER_STATS
bool "Collect kernel timers statistics" bool "Collect kernel timers statistics"
depends on DEBUG_KERNEL && PROC_FS depends on DEBUG_KERNEL && PROC_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment