Commit 4db88eb4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 - Fix inconstinant clock usage in virtual time accounting
 - Fix a build error in KVM caused by the NOHZ work
 - Remove a pointless timekeeping duty assignment which breaks NOHZ
 - Use a proper notifier return value to avoid random behaviour

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tick: Remove useless timekeeping duty attribution to broadcast source
  nohz: Fix notifier return val that enforce timekeeping
  kvm: Move guest entry/exit APIs to context_tracking
  vtime: Use consistent clocks among nohz accounting
parents 53d5defc f5d00c1f
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/vtime.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
struct context_tracking { struct context_tracking {
...@@ -19,6 +20,26 @@ struct context_tracking { ...@@ -19,6 +20,26 @@ struct context_tracking {
} state; } state;
}; };
static inline void __guest_enter(void)
{
/*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags |= PF_VCPU;
}
static inline void __guest_exit(void)
{
/*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags &= ~PF_VCPU;
}
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
DECLARE_PER_CPU(struct context_tracking, context_tracking); DECLARE_PER_CPU(struct context_tracking, context_tracking);
...@@ -35,6 +56,9 @@ static inline bool context_tracking_active(void) ...@@ -35,6 +56,9 @@ static inline bool context_tracking_active(void)
extern void user_enter(void); extern void user_enter(void);
extern void user_exit(void); extern void user_exit(void);
extern void guest_enter(void);
extern void guest_exit(void);
static inline enum ctx_state exception_enter(void) static inline enum ctx_state exception_enter(void)
{ {
enum ctx_state prev_ctx; enum ctx_state prev_ctx;
...@@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev, ...@@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev,
static inline bool context_tracking_in_user(void) { return false; } static inline bool context_tracking_in_user(void) { return false; }
static inline void user_enter(void) { } static inline void user_enter(void) { }
static inline void user_exit(void) { } static inline void user_exit(void) { }
static inline void guest_enter(void)
{
__guest_enter();
}
static inline void guest_exit(void)
{
__guest_exit();
}
static inline enum ctx_state exception_enter(void) { return 0; } static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline void context_tracking_task_switch(struct task_struct *prev, static inline void context_tracking_task_switch(struct task_struct *prev,
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <linux/kvm.h> #include <linux/kvm.h>
...@@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) ...@@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
} }
#endif #endif
static inline void __guest_enter(void)
{
/*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags |= PF_VCPU;
}
static inline void __guest_exit(void)
{
/*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags &= ~PF_VCPU;
}
#ifdef CONFIG_CONTEXT_TRACKING
extern void guest_enter(void);
extern void guest_exit(void);
#else /* !CONFIG_CONTEXT_TRACKING */
static inline void guest_enter(void)
{
__guest_enter();
}
static inline void guest_exit(void)
{
__guest_exit();
}
#endif /* !CONFIG_CONTEXT_TRACKING */
static inline void kvm_guest_enter(void) static inline void kvm_guest_enter(void)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk) ...@@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
} }
extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk);
extern void vtime_init_idle(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu);
#else #else
static inline void vtime_account_irq_exit(struct task_struct *tsk) static inline void vtime_account_irq_exit(struct task_struct *tsk)
{ {
...@@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { } ...@@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { }
static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { }
static inline void vtime_guest_enter(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { }
static inline void vtime_guest_exit(struct task_struct *tsk) { } static inline void vtime_guest_exit(struct task_struct *tsk) { }
static inline void vtime_init_idle(struct task_struct *tsk) { } static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
#endif #endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
*/ */
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/kvm_host.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
......
...@@ -4745,7 +4745,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) ...@@ -4745,7 +4745,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/ */
idle->sched_class = &idle_sched_class; idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu); ftrace_graph_init_idle_task(idle, cpu);
vtime_init_idle(idle); vtime_init_idle(idle, cpu);
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
#endif #endif
......
...@@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev) ...@@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev)
write_seqlock(&current->vtime_seqlock); write_seqlock(&current->vtime_seqlock);
current->vtime_snap_whence = VTIME_SYS; current->vtime_snap_whence = VTIME_SYS;
current->vtime_snap = sched_clock(); current->vtime_snap = sched_clock_cpu(smp_processor_id());
write_sequnlock(&current->vtime_seqlock); write_sequnlock(&current->vtime_seqlock);
} }
void vtime_init_idle(struct task_struct *t) void vtime_init_idle(struct task_struct *t, int cpu)
{ {
unsigned long flags; unsigned long flags;
write_seqlock_irqsave(&t->vtime_seqlock, flags); write_seqlock_irqsave(&t->vtime_seqlock, flags);
t->vtime_snap_whence = VTIME_SYS; t->vtime_snap_whence = VTIME_SYS;
t->vtime_snap = sched_clock(); t->vtime_snap = sched_clock_cpu(cpu);
write_sequnlock_irqrestore(&t->vtime_seqlock, flags); write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
} }
......
...@@ -698,10 +698,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) ...@@ -698,10 +698,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
bc->event_handler = tick_handle_oneshot_broadcast; bc->event_handler = tick_handle_oneshot_broadcast;
/* Take the do_timer update */
if (!tick_nohz_full_cpu(cpu))
tick_do_timer_cpu = cpu;
/* /*
* We must be careful here. There might be other CPUs * We must be careful here. There might be other CPUs
* waiting for periodic broadcast. We need to set the * waiting for periodic broadcast. We need to set the
......
...@@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, ...@@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
* we can't safely shutdown that CPU. * we can't safely shutdown that CPU.
*/ */
if (have_nohz_full_mask && tick_do_timer_cpu == cpu) if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
return -EINVAL; return NOTIFY_BAD;
break; break;
} }
return NOTIFY_OK; return NOTIFY_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment