Commit bac5b6b6 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/cputime: Move the vtime task fields to their own struct

We are about to add vtime accumulation fields to the task struct. Let's
avoid more bloatification and gather vtime information to their own
struct.
Tested-by: default avatarLuiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Wanpeng Li <kernellwp@gmail.com>
Link: http://lkml.kernel.org/r/1498756511-11714-5-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 60a9ce57
...@@ -170,9 +170,9 @@ extern struct cred init_cred; ...@@ -170,9 +170,9 @@ extern struct cred init_cred;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \ # define INIT_VTIME(tsk) \
.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
.vtime_starttime = 0, \ .vtime.starttime = 0, \
.vtime_state = VTIME_SYS, .vtime.state = VTIME_SYS,
#else #else
# define INIT_VTIME(tsk) # define INIT_VTIME(tsk)
#endif #endif
......
...@@ -223,6 +223,21 @@ struct task_cputime { ...@@ -223,6 +223,21 @@ struct task_cputime {
#define prof_exp stime #define prof_exp stime
#define sched_exp sum_exec_runtime #define sched_exp sum_exec_runtime
enum vtime_state {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
/* Task runs in userspace in a CPU with VTIME active: */
VTIME_USER,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
};
struct sched_info { struct sched_info {
#ifdef CONFIG_SCHED_INFO #ifdef CONFIG_SCHED_INFO
/* Cumulative counters: */ /* Cumulative counters: */
...@@ -688,16 +703,7 @@ struct task_struct { ...@@ -688,16 +703,7 @@ struct task_struct {
u64 gtime; u64 gtime;
struct prev_cputime prev_cputime; struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_t vtime_seqcount; struct vtime vtime;
unsigned long long vtime_starttime;
enum {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
/* Task runs in userspace in a CPU with VTIME active: */
VTIME_USER,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
} vtime_state;
#endif #endif
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
......
...@@ -1637,9 +1637,9 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -1637,9 +1637,9 @@ static __latent_entropy struct task_struct *copy_process(
prev_cputime_init(&p->prev_cputime); prev_cputime_init(&p->prev_cputime);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_init(&p->vtime_seqcount); seqcount_init(&p->vtime.seqcount);
p->vtime_starttime = 0; p->vtime.starttime = 0;
p->vtime_state = VTIME_INACTIVE; p->vtime.state = VTIME_INACTIVE;
#endif #endif
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
......
...@@ -679,17 +679,17 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) ...@@ -679,17 +679,17 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static u64 vtime_delta(struct task_struct *tsk) static u64 vtime_delta(struct vtime *vtime)
{ {
unsigned long now = READ_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
if (time_before(now, (unsigned long)tsk->vtime_starttime)) if (time_before(now, (unsigned long)vtime->starttime))
return 0; return 0;
return jiffies_to_nsecs(now - tsk->vtime_starttime); return jiffies_to_nsecs(now - vtime->starttime);
} }
static u64 get_vtime_delta(struct task_struct *tsk) static u64 get_vtime_delta(struct vtime *vtime)
{ {
unsigned long now = READ_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
u64 delta, other; u64 delta, other;
...@@ -701,49 +701,56 @@ static u64 get_vtime_delta(struct task_struct *tsk) ...@@ -701,49 +701,56 @@ static u64 get_vtime_delta(struct task_struct *tsk)
* elapsed time. Limit account_other_time to prevent rounding * elapsed time. Limit account_other_time to prevent rounding
* errors from causing elapsed vtime to go negative. * errors from causing elapsed vtime to go negative.
*/ */
delta = jiffies_to_nsecs(now - tsk->vtime_starttime); delta = jiffies_to_nsecs(now - vtime->starttime);
other = account_other_time(delta); other = account_other_time(delta);
WARN_ON_ONCE(tsk->vtime_state == VTIME_INACTIVE); WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
tsk->vtime_starttime = now; vtime->starttime = now;
return delta - other; return delta - other;
} }
static void __vtime_account_system(struct task_struct *tsk) static void __vtime_account_system(struct task_struct *tsk)
{ {
account_system_time(tsk, irq_count(), get_vtime_delta(tsk)); account_system_time(tsk, irq_count(), get_vtime_delta(&tsk->vtime));
} }
void vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk)
{ {
if (!vtime_delta(tsk)) struct vtime *vtime = &tsk->vtime;
if (!vtime_delta(vtime))
return; return;
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&vtime->seqcount);
__vtime_account_system(tsk); __vtime_account_system(tsk);
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&vtime->seqcount);
} }
void vtime_user_enter(struct task_struct *tsk) void vtime_user_enter(struct task_struct *tsk)
{ {
write_seqcount_begin(&tsk->vtime_seqcount); struct vtime *vtime = &tsk->vtime;
if (vtime_delta(tsk))
write_seqcount_begin(&vtime->seqcount);
if (vtime_delta(vtime))
__vtime_account_system(tsk); __vtime_account_system(tsk);
tsk->vtime_snap_whence = VTIME_USER; vtime->state = VTIME_USER;
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&vtime->seqcount);
} }
void vtime_user_exit(struct task_struct *tsk) void vtime_user_exit(struct task_struct *tsk)
{ {
write_seqcount_begin(&tsk->vtime_seqcount); struct vtime *vtime = &tsk->vtime;
if (vtime_delta(tsk))
account_user_time(tsk, get_vtime_delta(tsk)); write_seqcount_begin(&vtime->seqcount);
tsk->vtime_snap_whence = VTIME_SYS; if (vtime_delta(vtime))
write_seqcount_end(&tsk->vtime_seqcount); account_user_time(tsk, get_vtime_delta(vtime));
vtime->state = VTIME_SYS;
write_seqcount_end(&vtime->seqcount);
} }
void vtime_guest_enter(struct task_struct *tsk) void vtime_guest_enter(struct task_struct *tsk)
{ {
struct vtime *vtime = &tsk->vtime;
/* /*
* The flags must be updated under the lock with * The flags must be updated under the lock with
* the vtime_starttime flush and update. * the vtime_starttime flush and update.
...@@ -751,54 +758,62 @@ void vtime_guest_enter(struct task_struct *tsk) ...@@ -751,54 +758,62 @@ void vtime_guest_enter(struct task_struct *tsk)
* synchronization against the reader (task_gtime()) * synchronization against the reader (task_gtime())
* that can thus safely catch up with a tickless delta. * that can thus safely catch up with a tickless delta.
*/ */
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&vtime->seqcount);
if (vtime_delta(tsk)) if (vtime_delta(vtime))
__vtime_account_system(tsk); __vtime_account_system(tsk);
current->flags |= PF_VCPU; current->flags |= PF_VCPU;
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&vtime->seqcount);
} }
EXPORT_SYMBOL_GPL(vtime_guest_enter); EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk) void vtime_guest_exit(struct task_struct *tsk)
{ {
write_seqcount_begin(&tsk->vtime_seqcount); struct vtime *vtime = &tsk->vtime;
write_seqcount_begin(&vtime->seqcount);
__vtime_account_system(tsk); __vtime_account_system(tsk);
current->flags &= ~PF_VCPU; current->flags &= ~PF_VCPU;
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&vtime->seqcount);
} }
EXPORT_SYMBOL_GPL(vtime_guest_exit); EXPORT_SYMBOL_GPL(vtime_guest_exit);
void vtime_account_idle(struct task_struct *tsk) void vtime_account_idle(struct task_struct *tsk)
{ {
account_idle_time(get_vtime_delta(tsk)); account_idle_time(get_vtime_delta(&tsk->vtime));
} }
void arch_vtime_task_switch(struct task_struct *prev) void arch_vtime_task_switch(struct task_struct *prev)
{ {
write_seqcount_begin(&prev->vtime_seqcount); struct vtime *vtime = &prev->vtime;
prev->vtime_state = VTIME_INACTIVE;
write_seqcount_end(&prev->vtime_seqcount);
write_seqcount_begin(&current->vtime_seqcount); write_seqcount_begin(&vtime->seqcount);
current->vtime_state = VTIME_SYS; vtime->state = VTIME_INACTIVE;
current->vtime_starttime = jiffies; write_seqcount_end(&vtime->seqcount);
write_seqcount_end(&current->vtime_seqcount);
vtime = &current->vtime;
write_seqcount_begin(&vtime->seqcount);
vtime->state = VTIME_SYS;
vtime->starttime = jiffies;
write_seqcount_end(&vtime->seqcount);
} }
void vtime_init_idle(struct task_struct *t, int cpu) void vtime_init_idle(struct task_struct *t, int cpu)
{ {
struct vtime *vtime = &t->vtime;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
write_seqcount_begin(&t->vtime_seqcount); write_seqcount_begin(&vtime->seqcount);
t->vtime_state = VTIME_SYS; vtime->state = VTIME_SYS;
t->vtime_starttime = jiffies; vtime->starttime = jiffies;
write_seqcount_end(&t->vtime_seqcount); write_seqcount_end(&vtime->seqcount);
local_irq_restore(flags); local_irq_restore(flags);
} }
u64 task_gtime(struct task_struct *t) u64 task_gtime(struct task_struct *t)
{ {
struct vtime *vtime = &t->vtime;
unsigned int seq; unsigned int seq;
u64 gtime; u64 gtime;
...@@ -806,13 +821,13 @@ u64 task_gtime(struct task_struct *t) ...@@ -806,13 +821,13 @@ u64 task_gtime(struct task_struct *t)
return t->gtime; return t->gtime;
do { do {
seq = read_seqcount_begin(&t->vtime_seqcount); seq = read_seqcount_begin(&vtime->seqcount);
gtime = t->gtime; gtime = t->gtime;
if (t->vtime_state == VTIME_SYS && t->flags & PF_VCPU) if (vtime->state == VTIME_SYS && t->flags & PF_VCPU)
gtime += vtime_delta(t); gtime += vtime_delta(vtime);
} while (read_seqcount_retry(&t->vtime_seqcount, seq)); } while (read_seqcount_retry(&vtime->seqcount, seq));
return gtime; return gtime;
} }
...@@ -824,8 +839,9 @@ u64 task_gtime(struct task_struct *t) ...@@ -824,8 +839,9 @@ u64 task_gtime(struct task_struct *t)
*/ */
void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{ {
u64 delta; struct vtime *vtime = &t->vtime;
unsigned int seq; unsigned int seq;
u64 delta;
if (!vtime_accounting_enabled()) { if (!vtime_accounting_enabled()) {
*utime = t->utime; *utime = t->utime;
...@@ -834,25 +850,25 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) ...@@ -834,25 +850,25 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
} }
do { do {
seq = read_seqcount_begin(&t->vtime_seqcount); seq = read_seqcount_begin(&vtime->seqcount);
*utime = t->utime; *utime = t->utime;
*stime = t->stime; *stime = t->stime;
/* Task is sleeping, nothing to add */ /* Task is sleeping, nothing to add */
if (t->vtime_state == VTIME_INACTIVE || is_idle_task(t)) if (vtime->state == VTIME_INACTIVE || is_idle_task(t))
continue; continue;
delta = vtime_delta(t); delta = vtime_delta(vtime);
/* /*
* Task runs either in user or kernel space, add pending nohz time to * Task runs either in user or kernel space, add pending nohz time to
* the right place. * the right place.
*/ */
if (t->vtime_state == VTIME_USER || t->flags & PF_VCPU) if (vtime->state == VTIME_USER || t->flags & PF_VCPU)
*utime += delta; *utime += delta;
else if (t->vtime_state == VTIME_SYS) else if (vtime->state == VTIME_SYS)
*stime += delta; *stime += delta;
} while (read_seqcount_retry(&t->vtime_seqcount, seq)); } while (read_seqcount_retry(&vtime->seqcount, seq));
} }
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment