Commit f71bb0ac authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'timers/posixtimers' into timers/tracing

Merge reason: timer tracepoint patches depend on both branches
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parents 7285dd7f a42548a1
...@@ -30,6 +30,7 @@ typedef u64 cputime_t; ...@@ -30,6 +30,7 @@ typedef u64 cputime_t;
typedef u64 cputime64_t; typedef u64 cputime64_t;
#define cputime_zero ((cputime_t)0) #define cputime_zero ((cputime_t)0)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~((cputime_t)0) >> 1) - 1) #define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b)) #define cputime_sub(__a, __b) ((__a) - (__b))
......
...@@ -18,6 +18,9 @@ ...@@ -18,6 +18,9 @@
#ifndef CONFIG_VIRT_CPU_ACCOUNTING #ifndef CONFIG_VIRT_CPU_ACCOUNTING
#include <asm-generic/cputime.h> #include <asm-generic/cputime.h>
#ifdef __KERNEL__
static inline void setup_cputime_one_jiffy(void) { }
#endif
#else #else
#include <linux/types.h> #include <linux/types.h>
...@@ -48,6 +51,11 @@ typedef u64 cputime64_t; ...@@ -48,6 +51,11 @@ typedef u64 cputime64_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* One jiffy in timebase units computed during initialization
*/
extern cputime_t cputime_one_jiffy;
/* /*
* Convert cputime <-> jiffies * Convert cputime <-> jiffies
*/ */
...@@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) ...@@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
return ct; return ct;
} }
static inline void setup_cputime_one_jiffy(void)
{
cputime_one_jiffy = jiffies_to_cputime(1);
}
static inline cputime64_t jiffies64_to_cputime64(const u64 jif) static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{ {
cputime_t ct; cputime_t ct;
......
...@@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor); ...@@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor);
DEFINE_PER_CPU(unsigned long, cputime_last_delta); DEFINE_PER_CPU(unsigned long, cputime_last_delta);
DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
static void calc_cputime_factors(void) static void calc_cputime_factors(void)
{ {
struct div_result res; struct div_result res;
...@@ -500,6 +502,7 @@ static int __init iSeries_tb_recal(void) ...@@ -500,6 +502,7 @@ static int __init iSeries_tb_recal(void)
tb_to_xs = divres.result_low; tb_to_xs = divres.result_low;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
vdso_data->tb_to_xs = tb_to_xs; vdso_data->tb_to_xs = tb_to_xs;
setup_cputime_one_jiffy();
} }
else { else {
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
...@@ -950,6 +953,7 @@ void __init time_init(void) ...@@ -950,6 +953,7 @@ void __init time_init(void)
tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_ticks_per_usec = ppc_tb_freq / 1000000;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
calc_cputime_factors(); calc_cputime_factors();
setup_cputime_one_jiffy();
/* /*
* Calculate the length of each tick in ns. It will not be * Calculate the length of each tick in ns. It will not be
......
...@@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base) ...@@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base)
#endif /* __s390x__ */ #endif /* __s390x__ */
#define cputime_zero (0ULL) #define cputime_zero (0ULL)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1) #define cputime_max ((~0UL >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b)) #define cputime_sub(__a, __b) ((__a) - (__b))
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
typedef unsigned long cputime_t; typedef unsigned long cputime_t;
#define cputime_zero (0UL) #define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1) #define cputime_max ((~0UL >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b)) #define cputime_sub(__a, __b) ((__a) - (__b))
......
...@@ -470,6 +470,13 @@ struct pacct_struct { ...@@ -470,6 +470,13 @@ struct pacct_struct {
unsigned long ac_minflt, ac_majflt; unsigned long ac_minflt, ac_majflt;
}; };
struct cpu_itimer {
cputime_t expires;
cputime_t incr;
u32 error;
u32 incr_error;
};
/** /**
* struct task_cputime - collected CPU time counts * struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units * @utime: time spent in user mode, in &cputime_t units
...@@ -564,9 +571,12 @@ struct signal_struct { ...@@ -564,9 +571,12 @@ struct signal_struct {
struct pid *leader_pid; struct pid *leader_pid;
ktime_t it_real_incr; ktime_t it_real_incr;
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ /*
cputime_t it_prof_expires, it_virt_expires; * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
cputime_t it_prof_incr, it_virt_incr; * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
* values are defined to 0 and 1 respectively
*/
struct cpu_itimer it[2];
/* /*
* Thread group totals for process CPU timers. * Thread group totals for process CPU timers.
......
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/perf_counter.h> #include <linux/perf_counter.h>
#include <linux/posix-timers.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -790,10 +791,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) ...@@ -790,10 +791,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
thread_group_cputime_init(sig); thread_group_cputime_init(sig);
/* Expiration times and increments. */ /* Expiration times and increments. */
sig->it_virt_expires = cputime_zero; sig->it[CPUCLOCK_PROF].expires = cputime_zero;
sig->it_virt_incr = cputime_zero; sig->it[CPUCLOCK_PROF].incr = cputime_zero;
sig->it_prof_expires = cputime_zero; sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
sig->it_prof_incr = cputime_zero; sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
/* Cached expiration times. */ /* Cached expiration times. */
sig->cputime_expires.prof_exp = cputime_zero; sig->cputime_expires.prof_exp = cputime_zero;
......
...@@ -41,10 +41,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) ...@@ -41,10 +41,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
return ktime_to_timeval(rem); return ktime_to_timeval(rem);
} }
static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
struct itimerval *const value)
{
cputime_t cval, cinterval;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
spin_lock_irq(&tsk->sighand->siglock);
cval = it->expires;
cinterval = it->incr;
if (!cputime_eq(cval, cputime_zero)) {
struct task_cputime cputime;
cputime_t t;
thread_group_cputimer(tsk, &cputime);
if (clock_id == CPUCLOCK_PROF)
t = cputime_add(cputime.utime, cputime.stime);
else
/* CPUCLOCK_VIRT */
t = cputime.utime;
if (cputime_le(cval, t))
/* about to fire */
cval = cputime_one_jiffy;
else
cval = cputime_sub(cval, t);
}
spin_unlock_irq(&tsk->sighand->siglock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
}
int do_getitimer(int which, struct itimerval *value) int do_getitimer(int which, struct itimerval *value)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
cputime_t cinterval, cval;
switch (which) { switch (which) {
case ITIMER_REAL: case ITIMER_REAL:
...@@ -55,44 +88,10 @@ int do_getitimer(int which, struct itimerval *value) ...@@ -55,44 +88,10 @@ int do_getitimer(int which, struct itimerval *value)
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
break; break;
case ITIMER_VIRTUAL: case ITIMER_VIRTUAL:
spin_lock_irq(&tsk->sighand->siglock); get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr;
if (!cputime_eq(cval, cputime_zero)) {
struct task_cputime cputime;
cputime_t utime;
thread_group_cputimer(tsk, &cputime);
utime = cputime.utime;
if (cputime_le(cval, utime)) { /* about to fire */
cval = jiffies_to_cputime(1);
} else {
cval = cputime_sub(cval, utime);
}
}
spin_unlock_irq(&tsk->sighand->siglock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
break; break;
case ITIMER_PROF: case ITIMER_PROF:
spin_lock_irq(&tsk->sighand->siglock); get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr;
if (!cputime_eq(cval, cputime_zero)) {
struct task_cputime times;
cputime_t ptime;
thread_group_cputimer(tsk, &times);
ptime = cputime_add(times.utime, times.stime);
if (cputime_le(cval, ptime)) { /* about to fire */
cval = jiffies_to_cputime(1);
} else {
cval = cputime_sub(cval, ptime);
}
}
spin_unlock_irq(&tsk->sighand->siglock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
break; break;
default: default:
return(-EINVAL); return(-EINVAL);
...@@ -128,6 +127,54 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) ...@@ -128,6 +127,54 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
{
struct timespec ts;
s64 cpu_ns;
cputime_to_timespec(ct, &ts);
cpu_ns = timespec_to_ns(&ts);
return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
}
static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
const struct itimerval *const value,
struct itimerval *const ovalue)
{
cputime_t cval, nval, cinterval, ninterval;
s64 ns_ninterval, ns_nval;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
nval = timeval_to_cputime(&value->it_value);
ns_nval = timeval_to_ns(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval);
ns_ninterval = timeval_to_ns(&value->it_interval);
it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
it->error = cputime_sub_ns(nval, ns_nval);
spin_lock_irq(&tsk->sighand->siglock);
cval = it->expires;
cinterval = it->incr;
if (!cputime_eq(cval, cputime_zero) ||
!cputime_eq(nval, cputime_zero)) {
if (cputime_gt(nval, cputime_zero))
nval = cputime_add(nval, cputime_one_jiffy);
set_process_cpu_timer(tsk, clock_id, &nval, &cval);
}
it->expires = nval;
it->incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
}
}
/* /*
* Returns true if the timeval is in canonical form * Returns true if the timeval is in canonical form
*/ */
...@@ -139,7 +186,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) ...@@ -139,7 +186,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct hrtimer *timer; struct hrtimer *timer;
ktime_t expires; ktime_t expires;
cputime_t cval, cinterval, nval, ninterval;
/* /*
* Validate the timevals in value. * Validate the timevals in value.
...@@ -174,48 +220,10 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) ...@@ -174,48 +220,10 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
break; break;
case ITIMER_VIRTUAL: case ITIMER_VIRTUAL:
nval = timeval_to_cputime(&value->it_value); set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
ninterval = timeval_to_cputime(&value->it_interval);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr;
if (!cputime_eq(cval, cputime_zero) ||
!cputime_eq(nval, cputime_zero)) {
if (cputime_gt(nval, cputime_zero))
nval = cputime_add(nval,
jiffies_to_cputime(1));
set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
&nval, &cval);
}
tsk->signal->it_virt_expires = nval;
tsk->signal->it_virt_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
}
break; break;
case ITIMER_PROF: case ITIMER_PROF:
nval = timeval_to_cputime(&value->it_value); set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
ninterval = timeval_to_cputime(&value->it_interval);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr;
if (!cputime_eq(cval, cputime_zero) ||
!cputime_eq(nval, cputime_zero)) {
if (cputime_gt(nval, cputime_zero))
nval = cputime_add(nval,
jiffies_to_cputime(1));
set_process_cpu_timer(tsk, CPUCLOCK_PROF,
&nval, &cval);
}
tsk->signal->it_prof_expires = nval;
tsk->signal->it_prof_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
}
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
*/ */
void update_rlimit_cpu(unsigned long rlim_new) void update_rlimit_cpu(unsigned long rlim_new)
{ {
cputime_t cputime; cputime_t cputime = secs_to_cputime(rlim_new);
struct signal_struct *const sig = current->signal;
cputime = secs_to_cputime(rlim_new); if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
cputime_gt(current->signal->it_prof_expires, cputime)) {
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
...@@ -542,6 +542,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) ...@@ -542,6 +542,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
now); now);
} }
static inline int expires_gt(cputime_t expires, cputime_t new_exp)
{
return cputime_eq(expires, cputime_zero) ||
cputime_gt(expires, new_exp);
}
static inline int expires_le(cputime_t expires, cputime_t new_exp)
{
return !cputime_eq(expires, cputime_zero) &&
cputime_le(expires, new_exp);
}
/* /*
* Insert the timer on the appropriate list before any timers that * Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held * expire later. This must be called with the tasklist_lock held
...@@ -586,34 +597,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) ...@@ -586,34 +597,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
*/ */
if (CPUCLOCK_PERTHREAD(timer->it_clock)) { if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
union cpu_time_count *exp = &nt->expires;
switch (CPUCLOCK_WHICH(timer->it_clock)) { switch (CPUCLOCK_WHICH(timer->it_clock)) {
default: default:
BUG(); BUG();
case CPUCLOCK_PROF: case CPUCLOCK_PROF:
if (cputime_eq(p->cputime_expires.prof_exp, if (expires_gt(p->cputime_expires.prof_exp,
cputime_zero) || exp->cpu))
cputime_gt(p->cputime_expires.prof_exp, p->cputime_expires.prof_exp = exp->cpu;
nt->expires.cpu))
p->cputime_expires.prof_exp =
nt->expires.cpu;
break; break;
case CPUCLOCK_VIRT: case CPUCLOCK_VIRT:
if (cputime_eq(p->cputime_expires.virt_exp, if (expires_gt(p->cputime_expires.virt_exp,
cputime_zero) || exp->cpu))
cputime_gt(p->cputime_expires.virt_exp, p->cputime_expires.virt_exp = exp->cpu;
nt->expires.cpu))
p->cputime_expires.virt_exp =
nt->expires.cpu;
break; break;
case CPUCLOCK_SCHED: case CPUCLOCK_SCHED:
if (p->cputime_expires.sched_exp == 0 || if (p->cputime_expires.sched_exp == 0 ||
p->cputime_expires.sched_exp > p->cputime_expires.sched_exp > exp->sched)
nt->expires.sched)
p->cputime_expires.sched_exp = p->cputime_expires.sched_exp =
nt->expires.sched; exp->sched;
break; break;
} }
} else { } else {
struct signal_struct *const sig = p->signal;
union cpu_time_count *exp = &timer->it.cpu.expires;
/* /*
* For a process timer, set the cached expiration time. * For a process timer, set the cached expiration time.
*/ */
...@@ -621,30 +630,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) ...@@ -621,30 +630,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
default: default:
BUG(); BUG();
case CPUCLOCK_VIRT: case CPUCLOCK_VIRT:
if (!cputime_eq(p->signal->it_virt_expires, if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
cputime_zero) && exp->cpu))
cputime_lt(p->signal->it_virt_expires,
timer->it.cpu.expires.cpu))
break; break;
p->signal->cputime_expires.virt_exp = sig->cputime_expires.virt_exp = exp->cpu;
timer->it.cpu.expires.cpu;
break; break;
case CPUCLOCK_PROF: case CPUCLOCK_PROF:
if (!cputime_eq(p->signal->it_prof_expires, if (expires_le(sig->it[CPUCLOCK_PROF].expires,
cputime_zero) && exp->cpu))
cputime_lt(p->signal->it_prof_expires,
timer->it.cpu.expires.cpu))
break; break;
i = p->signal->rlim[RLIMIT_CPU].rlim_cur; i = sig->rlim[RLIMIT_CPU].rlim_cur;
if (i != RLIM_INFINITY && if (i != RLIM_INFINITY &&
i <= cputime_to_secs(timer->it.cpu.expires.cpu)) i <= cputime_to_secs(exp->cpu))
break; break;
p->signal->cputime_expires.prof_exp = sig->cputime_expires.prof_exp = exp->cpu;
timer->it.cpu.expires.cpu;
break; break;
case CPUCLOCK_SCHED: case CPUCLOCK_SCHED:
p->signal->cputime_expires.sched_exp = sig->cputime_expires.sched_exp = exp->sched;
timer->it.cpu.expires.sched;
break; break;
} }
} }
...@@ -1071,6 +1073,36 @@ static void stop_process_timers(struct task_struct *tsk) ...@@ -1071,6 +1073,36 @@ static void stop_process_timers(struct task_struct *tsk)
spin_unlock_irqrestore(&cputimer->lock, flags); spin_unlock_irqrestore(&cputimer->lock, flags);
} }
static u32 onecputick;
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
cputime_t *expires, cputime_t cur_time, int signo)
{
if (cputime_eq(it->expires, cputime_zero))
return;
if (cputime_ge(cur_time, it->expires)) {
if (!cputime_eq(it->incr, cputime_zero)) {
it->expires = cputime_add(it->expires, it->incr);
it->error += it->incr_error;
if (it->error >= onecputick) {
it->expires = cputime_sub(it->expires,
cputime_one_jiffy);
it->error -= onecputick;
}
} else
it->expires = cputime_zero;
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(it->expires, cputime_zero) &&
(cputime_eq(*expires, cputime_zero) ||
cputime_lt(it->expires, *expires))) {
*expires = it->expires;
}
}
/* /*
* Check for any per-thread CPU timers that have fired and move them * Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers * off the tsk->*_timers list onto the firing list. Per-thread timers
...@@ -1090,10 +1122,10 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1090,10 +1122,10 @@ static void check_process_timers(struct task_struct *tsk,
* Don't sample the current process CPU clocks if there are no timers. * Don't sample the current process CPU clocks if there are no timers.
*/ */
if (list_empty(&timers[CPUCLOCK_PROF]) && if (list_empty(&timers[CPUCLOCK_PROF]) &&
cputime_eq(sig->it_prof_expires, cputime_zero) && cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
list_empty(&timers[CPUCLOCK_VIRT]) && list_empty(&timers[CPUCLOCK_VIRT]) &&
cputime_eq(sig->it_virt_expires, cputime_zero) && cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
list_empty(&timers[CPUCLOCK_SCHED])) { list_empty(&timers[CPUCLOCK_SCHED])) {
stop_process_timers(tsk); stop_process_timers(tsk);
return; return;
...@@ -1153,38 +1185,11 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1153,38 +1185,11 @@ static void check_process_timers(struct task_struct *tsk,
/* /*
* Check for the special case process timers. * Check for the special case process timers.
*/ */
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
if (cputime_ge(ptime, sig->it_prof_expires)) { SIGPROF);
/* ITIMER_PROF fires and reloads. */ check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
sig->it_prof_expires = sig->it_prof_incr; SIGVTALRM);
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
sig->it_prof_expires = cputime_add(
sig->it_prof_expires, ptime);
}
__group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
(cputime_eq(prof_expires, cputime_zero) ||
cputime_lt(sig->it_prof_expires, prof_expires))) {
prof_expires = sig->it_prof_expires;
}
}
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
if (cputime_ge(utime, sig->it_virt_expires)) {
/* ITIMER_VIRTUAL fires and reloads. */
sig->it_virt_expires = sig->it_virt_incr;
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
sig->it_virt_expires = cputime_add(
sig->it_virt_expires, utime);
}
__group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
(cputime_eq(virt_expires, cputime_zero) ||
cputime_lt(sig->it_virt_expires, virt_expires))) {
virt_expires = sig->it_virt_expires;
}
}
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime); unsigned long psecs = cputime_to_secs(ptime);
cputime_t x; cputime_t x;
...@@ -1457,7 +1462,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, ...@@ -1457,7 +1462,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
if (!cputime_eq(*oldval, cputime_zero)) { if (!cputime_eq(*oldval, cputime_zero)) {
if (cputime_le(*oldval, now.cpu)) { if (cputime_le(*oldval, now.cpu)) {
/* Just about to fire. */ /* Just about to fire. */
*oldval = jiffies_to_cputime(1); *oldval = cputime_one_jiffy;
} else { } else {
*oldval = cputime_sub(*oldval, now.cpu); *oldval = cputime_sub(*oldval, now.cpu);
} }
...@@ -1703,10 +1708,15 @@ static __init int init_posix_cpu_timers(void) ...@@ -1703,10 +1708,15 @@ static __init int init_posix_cpu_timers(void)
.nsleep = thread_cpu_nsleep, .nsleep = thread_cpu_nsleep,
.nsleep_restart = thread_cpu_nsleep_restart, .nsleep_restart = thread_cpu_nsleep_restart,
}; };
struct timespec ts;
register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
cputime_to_timespec(cputime_one_jiffy, &ts);
onecputick = ts.tv_nsec;
WARN_ON(ts.tv_sec != 0);
return 0; return 0;
} }
__initcall(init_posix_cpu_timers); __initcall(init_posix_cpu_timers);
...@@ -5031,17 +5031,16 @@ void account_idle_time(cputime_t cputime) ...@@ -5031,17 +5031,16 @@ void account_idle_time(cputime_t cputime)
*/ */
void account_process_tick(struct task_struct *p, int user_tick) void account_process_tick(struct task_struct *p, int user_tick)
{ {
cputime_t one_jiffy = jiffies_to_cputime(1); cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
struct rq *rq = this_rq(); struct rq *rq = this_rq();
if (user_tick) if (user_tick)
account_user_time(p, one_jiffy, one_jiffy_scaled); account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
account_system_time(p, HARDIRQ_OFFSET, one_jiffy, account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled); one_jiffy_scaled);
else else
account_idle_time(one_jiffy); account_idle_time(cputime_one_jiffy);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment