Commit 90608a29 authored by Aegis Lin's avatar Aegis Lin Committed by Paul Mackerras

[POWERPC] spufs: Use separate timer for /proc/spu_loadavg calculation

The original spusched_timer was designed to take effect only when
a context is waiting in the runqueue.

This change adds an additional lower-freq timer has been added to
purely handle the spu_load updates. The new timer will be triggered
per LOAD_FREQ ticks.
Signed-off-by: default avatarAegis Lin <aegislin@gmail.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent c9101bdb
...@@ -58,6 +58,7 @@ static unsigned long spu_avenrun[3]; ...@@ -58,6 +58,7 @@ static unsigned long spu_avenrun[3];
static struct spu_prio_array *spu_prio; static struct spu_prio_array *spu_prio;
static struct task_struct *spusched_task; static struct task_struct *spusched_task;
static struct timer_list spusched_timer; static struct timer_list spusched_timer;
static struct timer_list spuloadavg_timer;
/* /*
* Priority of a normal, non-rt, non-niced'd process (aka nice level 0). * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
...@@ -922,35 +923,31 @@ static unsigned long count_active_contexts(void) ...@@ -922,35 +923,31 @@ static unsigned long count_active_contexts(void)
} }
/** /**
* spu_calc_load - given tick count, update the avenrun load estimates. * spu_calc_load - update the avenrun load estimates.
* @tick: tick count
* *
* No locking against reading these values from userspace, as for * No locking against reading these values from userspace, as for
* the CPU loadavg code. * the CPU loadavg code.
*/ */
static void spu_calc_load(unsigned long ticks) static void spu_calc_load(void)
{ {
unsigned long active_tasks; /* fixed-point */ unsigned long active_tasks; /* fixed-point */
static int count = LOAD_FREQ;
active_tasks = count_active_contexts() * FIXED_1;
count -= ticks; CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
if (unlikely(count < 0)) { CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
active_tasks = count_active_contexts() * FIXED_1;
do {
CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
count += LOAD_FREQ;
} while (count < 0);
}
} }
static void spusched_wake(unsigned long data) static void spusched_wake(unsigned long data)
{ {
mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
wake_up_process(spusched_task); wake_up_process(spusched_task);
spu_calc_load(SPUSCHED_TICK); }
static void spuloadavg_wake(unsigned long data)
{
mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
spu_calc_load();
} }
static int spusched_thread(void *unused) static int spusched_thread(void *unused)
...@@ -1068,6 +1065,7 @@ int __init spu_sched_init(void) ...@@ -1068,6 +1065,7 @@ int __init spu_sched_init(void)
spin_lock_init(&spu_prio->runq_lock); spin_lock_init(&spu_prio->runq_lock);
setup_timer(&spusched_timer, spusched_wake, 0); setup_timer(&spusched_timer, spusched_wake, 0);
setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
spusched_task = kthread_run(spusched_thread, NULL, "spusched"); spusched_task = kthread_run(spusched_thread, NULL, "spusched");
if (IS_ERR(spusched_task)) { if (IS_ERR(spusched_task)) {
...@@ -1075,6 +1073,8 @@ int __init spu_sched_init(void) ...@@ -1075,6 +1073,8 @@ int __init spu_sched_init(void)
goto out_free_spu_prio; goto out_free_spu_prio;
} }
mod_timer(&spuloadavg_timer, 0);
entry = create_proc_entry("spu_loadavg", 0, NULL); entry = create_proc_entry("spu_loadavg", 0, NULL);
if (!entry) if (!entry)
goto out_stop_kthread; goto out_stop_kthread;
...@@ -1100,6 +1100,7 @@ void spu_sched_exit(void) ...@@ -1100,6 +1100,7 @@ void spu_sched_exit(void)
remove_proc_entry("spu_loadavg", NULL); remove_proc_entry("spu_loadavg", NULL);
del_timer_sync(&spusched_timer); del_timer_sync(&spusched_timer);
del_timer_sync(&spuloadavg_timer);
kthread_stop(spusched_task); kthread_stop(spusched_task);
for (node = 0; node < MAX_NUMNODES; node++) { for (node = 0; node < MAX_NUMNODES; node++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment