Commit a934a56e authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'timers/core-v2' of...

Merge branch 'timers/core-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core

Pull dynticks updates from Frederic Weisbecker:

  * Fix a bug where posix cpu timers requeued due to interval got ignored on full
    dynticks CPUs (not a regression though as it only impacts full dynticks and the
    bug is there since we merged full dynticks).

  * Optimizations and cleanups on the use of per CPU APIs to improve code readability,
    performance and debuggability in the nohz subsystem;

  * Optimize posix cpu timer by sparing stub workqueue queue with full dynticks off case

  * Rename some functions to extend with *_this_cpu() suffix for clarity

  * Refine the naming of some context tracking subsystem state accessors

  * Trivial spelling fix by Paul Gortmaker
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents dea4f48a c925077c
...@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev, ...@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
static inline void user_enter(void) static inline void user_enter(void)
{ {
if (static_key_false(&context_tracking_enabled)) if (context_tracking_is_enabled())
context_tracking_user_enter(); context_tracking_user_enter();
} }
static inline void user_exit(void) static inline void user_exit(void)
{ {
if (static_key_false(&context_tracking_enabled)) if (context_tracking_is_enabled())
context_tracking_user_exit(); context_tracking_user_exit();
} }
...@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void) ...@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void)
{ {
enum ctx_state prev_ctx; enum ctx_state prev_ctx;
if (!static_key_false(&context_tracking_enabled)) if (!context_tracking_is_enabled())
return 0; return 0;
prev_ctx = this_cpu_read(context_tracking.state); prev_ctx = this_cpu_read(context_tracking.state);
...@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void) ...@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx) static inline void exception_exit(enum ctx_state prev_ctx)
{ {
if (static_key_false(&context_tracking_enabled)) { if (context_tracking_is_enabled()) {
if (prev_ctx == IN_USER) if (prev_ctx == IN_USER)
context_tracking_user_enter(); context_tracking_user_enter();
} }
...@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) ...@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static inline void context_tracking_task_switch(struct task_struct *prev, static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
if (static_key_false(&context_tracking_enabled)) if (context_tracking_is_enabled())
__context_tracking_task_switch(prev, next); __context_tracking_task_switch(prev, next);
} }
#else #else
......
...@@ -22,15 +22,20 @@ struct context_tracking { ...@@ -22,15 +22,20 @@ struct context_tracking {
extern struct static_key context_tracking_enabled; extern struct static_key context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking); DECLARE_PER_CPU(struct context_tracking, context_tracking);
static inline bool context_tracking_in_user(void) static inline bool context_tracking_is_enabled(void)
{ {
return __this_cpu_read(context_tracking.state) == IN_USER; return static_key_false(&context_tracking_enabled);
} }
static inline bool context_tracking_active(void) static inline bool context_tracking_cpu_is_enabled(void)
{ {
return __this_cpu_read(context_tracking.active); return __this_cpu_read(context_tracking.active);
} }
static inline bool context_tracking_in_user(void)
{
return __this_cpu_read(context_tracking.state) == IN_USER;
}
#else #else
static inline bool context_tracking_in_user(void) { return false; } static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; } static inline bool context_tracking_active(void) { return false; }
......
...@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void); ...@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void); extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz); extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu); extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_check_idle(int cpu); extern void tick_check_idle(void);
extern int tick_oneshot_mode_active(void); extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu # ifndef arch_needs_cpu
# define arch_needs_cpu(cpu) (0) # define arch_needs_cpu(cpu) (0)
...@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void); ...@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
# else # else
static inline void tick_clock_notify(void) { } static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { } static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; } static inline int tick_oneshot_mode_active(void) { return 0; }
# endif # endif
...@@ -121,7 +121,7 @@ static inline void tick_init(void) { } ...@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { } static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { } static inline void tick_check_idle(void) { }
static inline int tick_oneshot_mode_active(void) { return 0; } static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
...@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask; ...@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void) static inline bool tick_nohz_full_enabled(void)
{ {
if (!static_key_false(&context_tracking_enabled)) if (!context_tracking_is_enabled())
return false; return false;
return tick_nohz_full_running; return tick_nohz_full_running;
......
...@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; } ...@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline bool vtime_accounting_enabled(void) static inline bool vtime_accounting_enabled(void)
{ {
if (static_key_false(&context_tracking_enabled)) { if (context_tracking_is_enabled()) {
if (context_tracking_active()) if (context_tracking_cpu_is_enabled())
return true; return true;
} }
......
...@@ -532,7 +532,7 @@ config CONTEXT_TRACKING_FORCE ...@@ -532,7 +532,7 @@ config CONTEXT_TRACKING_FORCE
dynticks subsystem by forcing the context tracking on all dynticks subsystem by forcing the context tracking on all
CPUs in the system. CPUs in the system.
Say Y only if you're working on the developpement of an Say Y only if you're working on the development of an
architecture backend for the context tracking. architecture backend for the context tracking.
Say N otherwise, this option brings an overhead that you Say N otherwise, this option brings an overhead that you
......
...@@ -53,10 +53,10 @@ void context_tracking_user_enter(void) ...@@ -53,10 +53,10 @@ void context_tracking_user_enter(void)
/* /*
* Repeat the user_enter() check here because some archs may be calling * Repeat the user_enter() check here because some archs may be calling
* this from asm and if no CPU needs context tracking, they shouldn't * this from asm and if no CPU needs context tracking, they shouldn't
* go further. Repeat the check here until they support the static key * go further. Repeat the check here until they support the inline static
* check. * key check.
*/ */
if (!static_key_false(&context_tracking_enabled)) if (!context_tracking_is_enabled())
return; return;
/* /*
...@@ -160,7 +160,7 @@ void context_tracking_user_exit(void) ...@@ -160,7 +160,7 @@ void context_tracking_user_exit(void)
{ {
unsigned long flags; unsigned long flags;
if (!static_key_false(&context_tracking_enabled)) if (!context_tracking_is_enabled())
return; return;
if (in_interrupt()) if (in_interrupt())
......
...@@ -608,7 +608,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn); ...@@ -608,7 +608,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
*/ */
static void posix_cpu_timer_kick_nohz(void) static void posix_cpu_timer_kick_nohz(void)
{ {
schedule_work(&nohz_kick_work); if (context_tracking_is_enabled())
schedule_work(&nohz_kick_work);
} }
bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
...@@ -1090,7 +1091,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) ...@@ -1090,7 +1091,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
put_task_struct(p); put_task_struct(p);
timer->it.cpu.task = p = NULL; timer->it.cpu.task = p = NULL;
timer->it.cpu.expires = 0; timer->it.cpu.expires = 0;
goto out_unlock; read_unlock(&tasklist_lock);
goto out;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) { } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
/* /*
* We've noticed that the thread is dead, but * We've noticed that the thread is dead, but
...@@ -1099,7 +1101,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) ...@@ -1099,7 +1101,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
*/ */
cpu_timer_sample_group(timer->it_clock, p, &now); cpu_timer_sample_group(timer->it_clock, p, &now);
clear_dead_task(timer, now); clear_dead_task(timer, now);
goto out_unlock; read_unlock(&tasklist_lock);
goto out;
} }
spin_lock(&p->sighand->siglock); spin_lock(&p->sighand->siglock);
cpu_timer_sample_group(timer->it_clock, p, &now); cpu_timer_sample_group(timer->it_clock, p, &now);
...@@ -1113,10 +1116,11 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) ...@@ -1113,10 +1116,11 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
arm_timer(timer); arm_timer(timer);
spin_unlock(&p->sighand->siglock); spin_unlock(&p->sighand->siglock);
out_unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
/* Kick full dynticks CPUs in case they need to tick on the new timer */
posix_cpu_timer_kick_nohz();
out: out:
timer->it_overrun_last = timer->it_overrun; timer->it_overrun_last = timer->it_overrun;
timer->it_overrun = -1; timer->it_overrun = -1;
...@@ -1256,13 +1260,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) ...@@ -1256,13 +1260,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
cpu_timer_fire(timer); cpu_timer_fire(timer);
spin_unlock(&timer->it_lock); spin_unlock(&timer->it_lock);
} }
/*
* In case some timers were rescheduled after the queue got emptied,
* wake up full dynticks CPUs.
*/
if (tsk->signal->cputimer.running)
posix_cpu_timer_kick_nohz();
} }
/* /*
......
...@@ -311,8 +311,6 @@ asmlinkage void do_softirq(void) ...@@ -311,8 +311,6 @@ asmlinkage void do_softirq(void)
*/ */
void irq_enter(void) void irq_enter(void)
{ {
int cpu = smp_processor_id();
rcu_irq_enter(); rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) { if (is_idle_task(current) && !in_interrupt()) {
/* /*
...@@ -320,7 +318,7 @@ void irq_enter(void) ...@@ -320,7 +318,7 @@ void irq_enter(void)
* here, as softirq will be serviced on return from interrupt. * here, as softirq will be serviced on return from interrupt.
*/ */
local_bh_disable(); local_bh_disable();
tick_check_idle(cpu); tick_check_idle();
_local_bh_enable(); _local_bh_enable();
} }
......
...@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) ...@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
* Called from irq_enter() when idle was interrupted to reenable the * Called from irq_enter() when idle was interrupted to reenable the
* per cpu device. * per cpu device.
*/ */
void tick_check_oneshot_broadcast(int cpu) void tick_check_oneshot_broadcast_this_cpu(void)
{ {
if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
struct tick_device *td = &per_cpu(tick_cpu_device, cpu); struct tick_device *td = &__get_cpu_var(tick_cpu_device);
/* /*
* We might be in the middle of switching over from * We might be in the middle of switching over from
......
...@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void); ...@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void); extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast(int cpu); extern void tick_check_oneshot_broadcast_this_cpu(void);
bool tick_broadcast_oneshot_available(void); bool tick_broadcast_oneshot_available(void);
# else /* BROADCAST */ # else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
...@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { } ...@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast(int cpu) { } static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
static inline bool tick_broadcast_oneshot_available(void) { return true; } static inline bool tick_broadcast_oneshot_available(void) { return true; }
# endif /* !BROADCAST */ # endif /* !BROADCAST */
......
...@@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz); ...@@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz);
*/ */
static void tick_nohz_update_jiffies(ktime_t now) static void tick_nohz_update_jiffies(ktime_t now)
{ {
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags; unsigned long flags;
ts->idle_waketime = now; __this_cpu_write(tick_cpu_sched.idle_waketime, now);
local_irq_save(flags); local_irq_save(flags);
tick_do_update_jiffies64(now); tick_do_update_jiffies64(now);
...@@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda ...@@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
} }
static void tick_nohz_stop_idle(int cpu, ktime_t now) static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
{ {
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); update_ts_time_stats(smp_processor_id(), ts, now, NULL);
update_ts_time_stats(cpu, ts, now, NULL);
ts->idle_active = 0; ts->idle_active = 0;
sched_clock_idle_wakeup_event(0); sched_clock_idle_wakeup_event(0);
} }
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{ {
ktime_t now = ktime_get(); ktime_t now = ktime_get();
...@@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) ...@@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
ktime_t now, expires; ktime_t now, expires;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
now = tick_nohz_start_idle(cpu, ts); now = tick_nohz_start_idle(ts);
if (can_stop_idle_tick(cpu, ts)) { if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped; int was_stopped = ts->tick_stopped;
...@@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) ...@@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
*/ */
void tick_nohz_idle_exit(void) void tick_nohz_idle_exit(void)
{ {
int cpu = smp_processor_id(); struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t now; ktime_t now;
local_irq_disable(); local_irq_disable();
...@@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void) ...@@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void)
now = ktime_get(); now = ktime_get();
if (ts->idle_active) if (ts->idle_active)
tick_nohz_stop_idle(cpu, now); tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) { if (ts->tick_stopped) {
tick_nohz_restart_sched_tick(ts, now); tick_nohz_restart_sched_tick(ts, now);
...@@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void) ...@@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void)
* timer and do not touch the other magic bits which need to be done * timer and do not touch the other magic bits which need to be done
* when idle is left. * when idle is left.
*/ */
static void tick_nohz_kick_tick(int cpu, ktime_t now) static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
{ {
#if 0 #if 0
/* Switch back to 2.6.27 behaviour */ /* Switch back to 2.6.27 behaviour */
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t delta; ktime_t delta;
/* /*
...@@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now) ...@@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
#endif #endif
} }
static inline void tick_check_nohz(int cpu) static inline void tick_check_nohz_this_cpu(void)
{ {
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t now; ktime_t now;
if (!ts->idle_active && !ts->tick_stopped) if (!ts->idle_active && !ts->tick_stopped)
return; return;
now = ktime_get(); now = ktime_get();
if (ts->idle_active) if (ts->idle_active)
tick_nohz_stop_idle(cpu, now); tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) { if (ts->tick_stopped) {
tick_nohz_update_jiffies(now); tick_nohz_update_jiffies(now);
tick_nohz_kick_tick(cpu, now); tick_nohz_kick_tick(ts, now);
} }
} }
#else #else
static inline void tick_nohz_switch_to_nohz(void) { } static inline void tick_nohz_switch_to_nohz(void) { }
static inline void tick_check_nohz(int cpu) { } static inline void tick_check_nohz_this_cpu(void) { }
#endif /* CONFIG_NO_HZ_COMMON */ #endif /* CONFIG_NO_HZ_COMMON */
/* /*
* Called from irq_enter to notify about the possible interruption of idle() * Called from irq_enter to notify about the possible interruption of idle()
*/ */
void tick_check_idle(int cpu) void tick_check_idle(void)
{ {
tick_check_oneshot_broadcast(cpu); tick_check_oneshot_broadcast_this_cpu();
tick_check_nohz(cpu); tick_check_nohz_this_cpu();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment