Commit dc799d01 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 "The timer departement delivers:

   - a regression fix for the NTP code along with a proper selftest
   - prevent a spurious timer interrupt in the NOHZ lowres code
   - a fix for user space interfaces returning the remaining time on
     architectures with CONFIG_TIME_LOW_RES=y
   - a few patches to fix COMPILE_TEST fallout"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tick/nohz: Set the correct expiry when switching to nohz/lowres mode
  clocksource: Fix dependencies for archs w/o HAS_IOMEM
  clocksource: Select CLKSRC_MMIO where needed
  tick/sched: Hide unused oneshot timer code
  kselftests: timers: Add adjtimex SETOFFSET validity tests
  ntp: Fix ADJ_SETOFFSET being used w/ ADJ_NANO
  itimers: Handle relative timers with CONFIG_TIME_LOW_RES proper
  posix-timers: Handle relative timers with CONFIG_TIME_LOW_RES proper
  timerfd: Handle relative timers with CONFIG_TIME_LOW_RES proper
  hrtimer: Handle remaining time proper for TIME_LOW_RES
  clockevents/tcb_clksrc: Prevent disabling an already disabled clock
parents 7ab85d4a 1ca8ec53
......@@ -30,6 +30,8 @@ config CLKSRC_MMIO
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
depends on HAS_IOMEM
help
Enables the support for the digicolor timer driver.
......@@ -55,6 +57,7 @@ config ARMADA_370_XP_TIMER
bool "Armada 370 and XP timer driver" if COMPILE_TEST
depends on ARM
select CLKSRC_OF
select CLKSRC_MMIO
help
Enables the support for the Armada 370 and XP timer driver.
......@@ -76,6 +79,7 @@ config ORION_TIMER
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
help
Enables support for the Sun4i timer.
......@@ -89,6 +93,7 @@ config SUN5I_HSTIMER
config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
select CLKSRC_MMIO
depends on ARM
help
Enables support for the Tegra driver.
......@@ -96,6 +101,7 @@ config TEGRA_TIMER
config VT8500_TIMER
bool "VT8500 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
Enables support for the VT8500 driver.
......@@ -131,6 +137,7 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
Use the always on PRCMU Timer as clocksource
......@@ -248,6 +255,7 @@ config CLKSRC_EXYNOS_MCT
config CLKSRC_SAMSUNG_PWM
bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
......@@ -257,12 +265,14 @@ config CLKSRC_SAMSUNG_PWM
config FSL_FTM_TIMER
bool "Freescale FlexTimer Module driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
help
Support for Freescale FlexTimer Module (FTM) timer.
config VF_PIT_TIMER
bool
select CLKSRC_MMIO
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
......@@ -360,6 +370,7 @@ config CLKSRC_TANGO_XTAL
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
help
This enables OST0 support available on PXA and SA-11x0
......@@ -394,6 +405,7 @@ config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select CLKSRC_OF if OF
depends on HAS_IOMEM
select CLKSRC_MMIO
help
Enable this option to use the Low Power controller timer
as clocksource.
......
......@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
clk_disable(tcd->clk);
if (!clockevent_state_detached(d))
clk_disable(tcd->clk);
return 0;
}
......
......@@ -153,7 +153,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
if (isalarm(ctx))
remaining = alarm_expires_remaining(&ctx->t.alarm);
else
remaining = hrtimer_expires_remaining(&ctx->t.tmr);
remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
}
......
......@@ -87,7 +87,8 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @start_pid: timer statistics field to store the pid of the task which
* @is_rel: Set if the timer was armed relative
* @start_pid: timer statistics field to store the pid of the task which
* started the timer
* @start_site: timer statistics field to store the site where the timer
* was started
......@@ -101,7 +102,8 @@ struct hrtimer {
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
u8 state;
u8 is_rel;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
......@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
#endif
static inline ktime_t
__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
{
ktime_t rem = ktime_sub(timer->node.expires, now);
/*
* Adjust relative timers for the extra we added in
* hrtimer_start_range_ns() to prevent short timeouts.
*/
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
rem.tv64 -= hrtimer_resolution;
return rem;
}
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
return __hrtimer_expires_remaining_adjusted(timer,
timer->base->get_time());
}
extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
......@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
}
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
}
extern u64 hrtimer_get_next_event(void);
......
......@@ -897,10 +897,10 @@ static int enqueue_hrtimer(struct hrtimer *timer,
*/
static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
unsigned int state = timer->state;
u8 state = timer->state;
timer->state = newstate;
if (!(state & HRTIMER_STATE_ENQUEUED))
......@@ -930,7 +930,7 @@ static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
if (hrtimer_is_queued(timer)) {
unsigned long state = timer->state;
u8 state = timer->state;
int reprogram;
/*
......@@ -954,6 +954,22 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
return 0;
}
static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
{
#ifdef CONFIG_TIME_LOW_RES
/*
* CONFIG_TIME_LOW_RES indicates that the system has no way to return
* granular time values. For relative timers we add hrtimer_resolution
* (i.e. one jiffie) to prevent short timeouts.
*/
timer->is_rel = mode & HRTIMER_MODE_REL;
if (timer->is_rel)
tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
#endif
return tim;
}
/**
* hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
* @timer: the timer to be added
......@@ -974,19 +990,10 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
if (mode & HRTIMER_MODE_REL) {
if (mode & HRTIMER_MODE_REL)
tim = ktime_add_safe(tim, base->get_time());
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
* to signal that they simply return xtime in
* do_gettimeoffset(). In this case we want to round up by
* resolution when starting a relative timer, to avoid short
* timeouts. This will go away with the GTOD framework.
*/
#ifdef CONFIG_TIME_LOW_RES
tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
#endif
}
tim = hrtimer_update_lowres(timer, tim, mode);
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
......@@ -1074,19 +1081,23 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
* hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
* @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
{
unsigned long flags;
ktime_t rem;
lock_hrtimer_base(timer, &flags);
rem = hrtimer_expires_remaining(timer);
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
rem = hrtimer_expires_remaining_adjusted(timer);
else
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
return rem;
}
EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
#ifdef CONFIG_NO_HZ_COMMON
/**
......@@ -1219,6 +1230,14 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
timer_stats_account_hrtimer(timer);
fn = timer->function;
/*
* Clear the 'is relative' flag for the TIME_LOW_RES case. If the
* timer is restarted with a period then it becomes an absolute
* timer. If its not restarted it does not matter.
*/
if (IS_ENABLED(CONFIG_TIME_LOW_RES))
timer->is_rel = false;
/*
* Because we run timers from hardirq context, there is no chance
* they get migrated to another cpu, therefore its safe to unlock
......
......@@ -26,7 +26,7 @@
*/
static struct timeval itimer_get_remtime(struct hrtimer *timer)
{
ktime_t rem = hrtimer_get_remaining(timer);
ktime_t rem = __hrtimer_get_remaining(timer, true);
/*
* Racy but safe: if the itimer expires after the above
......
......@@ -685,8 +685,18 @@ int ntp_validate_timex(struct timex *txc)
if (!capable(CAP_SYS_TIME))
return -EPERM;
if (!timeval_inject_offset_valid(&txc->time))
return -EINVAL;
if (txc->modes & ADJ_NANO) {
struct timespec ts;
ts.tv_sec = txc->time.tv_sec;
ts.tv_nsec = txc->time.tv_usec;
if (!timespec_inject_offset_valid(&ts))
return -EINVAL;
} else {
if (!timeval_inject_offset_valid(&txc->time))
return -EINVAL;
}
}
/*
......
......@@ -760,7 +760,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
remaining = ktime_sub(hrtimer_get_expires(timer), now);
remaining = __hrtimer_expires_remaining_adjusted(timer, now);
/* Return 0 only, when the timer is expired and not pending */
if (remaining.tv64 <= 0) {
/*
......
......@@ -36,16 +36,17 @@
*/
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/*
* The time, when the last jiffy update happened. Protected by jiffies_lock.
*/
static ktime_t last_jiffies_update;
struct tick_sched *tick_get_tick_sched(int cpu)
{
return &per_cpu(tick_cpu_sched, cpu);
}
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
/*
* The time, when the last jiffy update happened. Protected by jiffies_lock.
*/
static ktime_t last_jiffies_update;
/*
* Must be called with interrupts disabled !
*/
......@@ -151,6 +152,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
}
#endif
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
......@@ -993,9 +995,9 @@ static void tick_nohz_switch_to_nohz(void)
/* Get the next period */
next = tick_init_jiffy_update();
hrtimer_forward_now(&ts->sched_timer, tick_period);
hrtimer_set_expires(&ts->sched_timer, next);
tick_program_event(next, 1);
hrtimer_forward_now(&ts->sched_timer, tick_period);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
......
......@@ -69,7 +69,7 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
print_name_offset(m, taddr);
SEQ_printf(m, ", ");
print_name_offset(m, timer->function);
SEQ_printf(m, ", S:%02lx", timer->state);
SEQ_printf(m, ", S:%02x", timer->state);
#ifdef CONFIG_TIMER_STATS
SEQ_printf(m, ", ");
print_name_offset(m, timer->start_site);
......
......@@ -45,7 +45,17 @@ static inline int ksft_exit_fail(void)
}
#endif
#define NSEC_PER_SEC 1000000000L
#define NSEC_PER_SEC 1000000000LL
#define USEC_PER_SEC 1000000LL
#define ADJ_SETOFFSET 0x0100
#include <sys/syscall.h>
static int clock_adjtime(clockid_t id, struct timex *tx)
{
return syscall(__NR_clock_adjtime, id, tx);
}
/* clear NTP time_status & time_state */
int clear_time_state(void)
......@@ -193,10 +203,137 @@ int validate_freq(void)
}
int set_offset(long long offset, int use_nano)
{
struct timex tmx = {};
int ret;
tmx.modes = ADJ_SETOFFSET;
if (use_nano) {
tmx.modes |= ADJ_NANO;
tmx.time.tv_sec = offset / NSEC_PER_SEC;
tmx.time.tv_usec = offset % NSEC_PER_SEC;
if (offset < 0 && tmx.time.tv_usec) {
tmx.time.tv_sec -= 1;
tmx.time.tv_usec += NSEC_PER_SEC;
}
} else {
tmx.time.tv_sec = offset / USEC_PER_SEC;
tmx.time.tv_usec = offset % USEC_PER_SEC;
if (offset < 0 && tmx.time.tv_usec) {
tmx.time.tv_sec -= 1;
tmx.time.tv_usec += USEC_PER_SEC;
}
}
ret = clock_adjtime(CLOCK_REALTIME, &tmx);
if (ret < 0) {
printf("(sec: %ld usec: %ld) ", tmx.time.tv_sec, tmx.time.tv_usec);
printf("[FAIL]\n");
return -1;
}
return 0;
}
int set_bad_offset(long sec, long usec, int use_nano)
{
struct timex tmx = {};
int ret;
tmx.modes = ADJ_SETOFFSET;
if (use_nano)
tmx.modes |= ADJ_NANO;
tmx.time.tv_sec = sec;
tmx.time.tv_usec = usec;
ret = clock_adjtime(CLOCK_REALTIME, &tmx);
if (ret >= 0) {
printf("Invalid (sec: %ld usec: %ld) did not fail! ", tmx.time.tv_sec, tmx.time.tv_usec);
printf("[FAIL]\n");
return -1;
}
return 0;
}
int validate_set_offset(void)
{
printf("Testing ADJ_SETOFFSET... ");
/* Test valid values */
if (set_offset(NSEC_PER_SEC - 1, 1))
return -1;
if (set_offset(-NSEC_PER_SEC + 1, 1))
return -1;
if (set_offset(-NSEC_PER_SEC - 1, 1))
return -1;
if (set_offset(5 * NSEC_PER_SEC, 1))
return -1;
if (set_offset(-5 * NSEC_PER_SEC, 1))
return -1;
if (set_offset(5 * NSEC_PER_SEC + NSEC_PER_SEC / 2, 1))
return -1;
if (set_offset(-5 * NSEC_PER_SEC - NSEC_PER_SEC / 2, 1))
return -1;
if (set_offset(USEC_PER_SEC - 1, 0))
return -1;
if (set_offset(-USEC_PER_SEC + 1, 0))
return -1;
if (set_offset(-USEC_PER_SEC - 1, 0))
return -1;
if (set_offset(5 * USEC_PER_SEC, 0))
return -1;
if (set_offset(-5 * USEC_PER_SEC, 0))
return -1;
if (set_offset(5 * USEC_PER_SEC + USEC_PER_SEC / 2, 0))
return -1;
if (set_offset(-5 * USEC_PER_SEC - USEC_PER_SEC / 2, 0))
return -1;
/* Test invalid values */
if (set_bad_offset(0, -1, 1))
return -1;
if (set_bad_offset(0, -1, 0))
return -1;
if (set_bad_offset(0, 2 * NSEC_PER_SEC, 1))
return -1;
if (set_bad_offset(0, 2 * USEC_PER_SEC, 0))
return -1;
if (set_bad_offset(0, NSEC_PER_SEC, 1))
return -1;
if (set_bad_offset(0, USEC_PER_SEC, 0))
return -1;
if (set_bad_offset(0, -NSEC_PER_SEC, 1))
return -1;
if (set_bad_offset(0, -USEC_PER_SEC, 0))
return -1;
printf("[OK]\n");
return 0;
}
int main(int argc, char **argv)
{
if (validate_freq())
return ksft_exit_fail();
if (validate_set_offset())
return ksft_exit_fail();
return ksft_exit_pass();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment