Commit 24ab7f5a authored by Thomas Gleixner's avatar Thomas Gleixner

posix-cpu-timers: Consolidate thread group sample code

cpu_clock_sample_group() and cpu_timer_sample_group() are almost the
same. Before the rename one called thread_group_cputimer() and the other
thread_group_cputime(). Really intuitive function names.

Consolidate the functions and also avoid the thread traversal when
the thread group's accounting is already active.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Link: https://lkml.kernel.org/r/20190821192919.960966884@linutronix.de
parent c506bef4
...@@ -294,29 +294,37 @@ thread_group_start_cputime(struct task_struct *tsk, struct task_cputime *times) ...@@ -294,29 +294,37 @@ thread_group_start_cputime(struct task_struct *tsk, struct task_cputime *times)
} }
/* /*
* Sample a process (thread group) clock for the given group_leader task. * Sample a process (thread group) clock for the given task clkid. If the
* Must be called with task sighand lock held for safe while_each_thread() * group's cputime accounting is already enabled, read the atomic
* traversal. * store. Otherwise a full update is required. Task's sighand lock must be
* held to protect the task traversal on a full update.
*/ */
static int cpu_clock_sample_group(const clockid_t which_clock, static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p, struct task_struct *p,
u64 *sample) u64 *sample, bool start)
{ {
struct thread_group_cputimer *cputimer = &p->signal->cputimer;
struct task_cputime cputime; struct task_cputime cputime;
if (!READ_ONCE(cputimer->running)) {
if (start)
thread_group_start_cputime(p, &cputime);
else
thread_group_cputime(p, &cputime);
} else {
sample_cputime_atomic(&cputime, &cputimer->cputime_atomic);
}
switch (CPUCLOCK_WHICH(which_clock)) { switch (CPUCLOCK_WHICH(which_clock)) {
default: default:
return -EINVAL; return -EINVAL;
case CPUCLOCK_PROF: case CPUCLOCK_PROF:
thread_group_cputime(p, &cputime);
*sample = cputime.utime + cputime.stime; *sample = cputime.utime + cputime.stime;
break; break;
case CPUCLOCK_VIRT: case CPUCLOCK_VIRT:
thread_group_cputime(p, &cputime);
*sample = cputime.utime; *sample = cputime.utime;
break; break;
case CPUCLOCK_SCHED: case CPUCLOCK_SCHED:
thread_group_cputime(p, &cputime);
*sample = cputime.sum_exec_runtime; *sample = cputime.sum_exec_runtime;
break; break;
} }
...@@ -336,7 +344,7 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp) ...@@ -336,7 +344,7 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
if (CPUCLOCK_PERTHREAD(clock)) if (CPUCLOCK_PERTHREAD(clock))
cpu_clock_sample(clkid, tsk, &t); cpu_clock_sample(clkid, tsk, &t);
else else
cpu_clock_sample_group(clkid, tsk, &t); cpu_clock_sample_group(clkid, tsk, &t, false);
put_task_struct(tsk); put_task_struct(tsk);
*tp = ns_to_timespec64(t); *tp = ns_to_timespec64(t);
...@@ -539,33 +547,6 @@ static void cpu_timer_fire(struct k_itimer *timer) ...@@ -539,33 +547,6 @@ static void cpu_timer_fire(struct k_itimer *timer)
} }
} }
/*
* Sample a process (thread group) timer for the given group_leader task.
* Must be called with task sighand lock held for safe while_each_thread()
* traversal.
*/
static int cpu_timer_sample_group(const clockid_t which_clock,
struct task_struct *p, u64 *sample)
{
struct task_cputime cputime;
thread_group_start_cputime(p, &cputime);
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
*sample = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
*sample = cputime.utime;
break;
case CPUCLOCK_SCHED:
*sample = cputime.sum_exec_runtime;
break;
}
return 0;
}
/* /*
* Guts of sys_timer_settime for CPU timers. * Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled. * This is called with the timer locked and interrupts disabled.
...@@ -627,7 +608,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, ...@@ -627,7 +608,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
if (CPUCLOCK_PERTHREAD(timer->it_clock)) { if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &val); cpu_clock_sample(timer->it_clock, p, &val);
} else { } else {
cpu_timer_sample_group(timer->it_clock, p, &val); cpu_clock_sample_group(timer->it_clock, p, &val, true);
} }
if (old) { if (old) {
...@@ -755,7 +736,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp ...@@ -755,7 +736,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
timer->it.cpu.expires = 0; timer->it.cpu.expires = 0;
return; return;
} else { } else {
cpu_timer_sample_group(timer->it_clock, p, &now); cpu_clock_sample_group(timer->it_clock, p, &now, false);
unlock_task_sighand(p, &flags); unlock_task_sighand(p, &flags);
} }
} }
...@@ -1042,7 +1023,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) ...@@ -1042,7 +1023,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
/* If the process is dying, no need to rearm */ /* If the process is dying, no need to rearm */
goto unlock; goto unlock;
} }
cpu_timer_sample_group(timer->it_clock, p, &now); cpu_clock_sample_group(timer->it_clock, p, &now, true);
bump_cpu_timer(timer, now); bump_cpu_timer(timer, now);
/* Leave the sighand locked for the call below. */ /* Leave the sighand locked for the call below. */
} }
...@@ -1211,7 +1192,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, ...@@ -1211,7 +1192,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED)) if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
return; return;
ret = cpu_timer_sample_group(clock_idx, tsk, &now); ret = cpu_clock_sample_group(clock_idx, tsk, &now, true);
if (oldval && ret != -EINVAL) { if (oldval && ret != -EINVAL) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment