Commit 7dd58230 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/cputime, ia64: Accumulate cputime and account only on tick/task switch

Currently CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y accounts the cputime on
any context boundary: irq entry/exit, guest entry/exit, context switch,
etc...

Calling functions such as account_system_time(), account_user_time()
and such can be costly, especially if they are called on many fastpath
such as twice per IRQ. Those functions do more than just accounting to
kcpustat and task cputime. Depending on the config, some subsystems can
perform unpleasant multiplications and divisions, among other things.

So lets accumulate the cputime instead and delay the accounting on ticks
and context switches only.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1483636310-6557-9-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a19ff1a2
...@@ -27,6 +27,12 @@ struct thread_info { ...@@ -27,6 +27,12 @@ struct thread_info {
mm_segment_t addr_limit; /* user-level address space limit */ mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
__u64 utime;
__u64 stime;
__u64 gtime;
__u64 hardirq_time;
__u64 softirq_time;
__u64 idle_time;
__u64 ac_stamp; __u64 ac_stamp;
__u64 ac_leave; __u64 ac_leave;
__u64 ac_stime; __u64 ac_stime;
......
...@@ -63,14 +63,39 @@ extern cputime_t cycle_to_cputime(u64 cyc); ...@@ -63,14 +63,39 @@ extern cputime_t cycle_to_cputime(u64 cyc);
void vtime_account_user(struct task_struct *tsk) void vtime_account_user(struct task_struct *tsk)
{ {
cputime_t delta_utime;
struct thread_info *ti = task_thread_info(tsk); struct thread_info *ti = task_thread_info(tsk);
cputime_t delta;
if (ti->ac_utime) { if (ti->utime)
delta_utime = cycle_to_cputime(ti->ac_utime); account_user_time(tsk, cycle_to_cputime(ti->utime));
account_user_time(tsk, delta_utime);
ti->ac_utime = 0; if (ti->gtime)
account_guest_time(tsk, cycle_to_cputime(ti->gtime));
if (ti->idle_time)
account_idle_time(cycle_to_cputime(ti->idle_time));
if (ti->stime) {
delta = cycle_to_cputime(ti->stime);
account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
}
if (ti->hardirq_time) {
delta = cycle_to_cputime(ti->hardirq_time);
account_system_index_time(tsk, delta, CPUTIME_IRQ);
}
if (ti->softirq_time) {
delta = cycle_to_cputime(ti->softirq_time);
account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
} }
ti->utime = 0;
ti->gtime = 0;
ti->idle_time = 0;
ti->stime = 0;
ti->hardirq_time = 0;
ti->softirq_time = 0;
} }
/* /*
...@@ -91,18 +116,15 @@ void arch_vtime_task_switch(struct task_struct *prev) ...@@ -91,18 +116,15 @@ void arch_vtime_task_switch(struct task_struct *prev)
* Account time for a transition between system, hard irq or soft irq state. * Account time for a transition between system, hard irq or soft irq state.
* Note that this function is called with interrupts enabled. * Note that this function is called with interrupts enabled.
*/ */
static cputime_t vtime_delta(struct task_struct *tsk) static __u64 vtime_delta(struct task_struct *tsk)
{ {
struct thread_info *ti = task_thread_info(tsk); struct thread_info *ti = task_thread_info(tsk);
cputime_t delta_stime; __u64 now, delta_stime;
__u64 now;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
now = ia64_get_itc(); now = ia64_get_itc();
delta_stime = now - ti->ac_stamp;
delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
ti->ac_stime = 0;
ti->ac_stamp = now; ti->ac_stamp = now;
return delta_stime; return delta_stime;
...@@ -110,15 +132,25 @@ static cputime_t vtime_delta(struct task_struct *tsk) ...@@ -110,15 +132,25 @@ static cputime_t vtime_delta(struct task_struct *tsk)
void vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk)
{ {
cputime_t delta = vtime_delta(tsk); struct thread_info *ti = task_thread_info(tsk);
__u64 stime = vtime_delta(tsk);
account_system_time(tsk, 0, delta);
if ((tsk->flags & PF_VCPU) && !irq_count())
ti->gtime += stime;
else if (hardirq_count())
ti->hardirq_time += stime;
else if (in_serving_softirq())
ti->softirq_time += stime;
else
ti->stime += stime;
} }
EXPORT_SYMBOL_GPL(vtime_account_system); EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk) void vtime_account_idle(struct task_struct *tsk)
{ {
account_idle_time(vtime_delta(tsk)); struct thread_info *ti = task_thread_info(tsk);
ti->idle_time += vtime_delta(tsk);
} }
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment