Commit 346ed9c1 authored by Dimitri Sivanich's avatar Dimitri Sivanich Committed by Linus Torvalds

[PATCH] Move cache_reap out of timer context

I'm submitting two patches associated with moving cache_reap functionality
out of timer context.  Note that these patches do not make any further
optimizations to cache_reap at this time.

The first patch adds a function similiar to schedule_delayed_work to allow
work to be scheduled on another cpu.

The second patch makes use of schedule_delayed_work_on to schedule
cache_reap to run from keventd.
Signed-off-by: default avatarDimitri Sivanich <sivanich@sgi.com>
Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6cfa4c9f
...@@ -63,6 +63,8 @@ extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); ...@@ -63,6 +63,8 @@ extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work)); extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
extern void flush_scheduled_work(void); extern void flush_scheduled_work(void);
extern int current_is_keventd(void); extern int current_is_keventd(void);
extern int keventd_up(void); extern int keventd_up(void);
......
...@@ -398,6 +398,26 @@ int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay ...@@ -398,6 +398,26 @@ int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay
return queue_delayed_work(keventd_wq, work, delay); return queue_delayed_work(keventd_wq, work, delay);
} }
int schedule_delayed_work_on(int cpu,
struct work_struct *work, unsigned long delay)
{
int ret = 0;
struct timer_list *timer = &work->timer;
if (!test_and_set_bit(0, &work->pending)) {
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
/* This stores keventd_wq for the moment, for the timer_fn */
work->wq_data = keventd_wq;
timer->expires = jiffies + delay;
timer->data = (unsigned long)work;
timer->function = delayed_work_timer_fn;
add_timer_on(timer, cpu);
ret = 1;
}
return ret;
}
void flush_scheduled_work(void) void flush_scheduled_work(void)
{ {
flush_workqueue(keventd_wq); flush_workqueue(keventd_wq);
......
...@@ -519,11 +519,11 @@ enum { ...@@ -519,11 +519,11 @@ enum {
FULL FULL
} g_cpucache_up; } g_cpucache_up;
static DEFINE_PER_CPU(struct timer_list, reap_timers); static DEFINE_PER_CPU(struct work_struct, reap_work);
static void reap_timer_fnc(unsigned long data);
static void free_block(kmem_cache_t* cachep, void** objpp, int len); static void free_block(kmem_cache_t* cachep, void** objpp, int len);
static void enable_cpucache (kmem_cache_t *cachep); static void enable_cpucache (kmem_cache_t *cachep);
static void cache_reap (void *unused);
static inline void ** ac_entry(struct array_cache *ac) static inline void ** ac_entry(struct array_cache *ac)
{ {
...@@ -573,35 +573,26 @@ static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) ...@@ -573,35 +573,26 @@ static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
} }
/* /*
* Start the reap timer running on the target CPU. We run at around 1 to 2Hz. * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
* Add the CPU number into the expiry time to minimize the possibility of the * via the workqueue/eventd.
* CPUs getting into lockstep and contending for the global cache chain lock. * Add the CPU number into the expiration time to minimize the possibility of
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/ */
static void __devinit start_cpu_timer(int cpu) static void __devinit start_cpu_timer(int cpu)
{ {
struct timer_list *rt = &per_cpu(reap_timers, cpu); struct work_struct *reap_work = &per_cpu(reap_work, cpu);
if (rt->function == NULL) { /*
init_timer(rt); * When this gets called from do_initcalls via cpucache_init(),
rt->expires = jiffies + HZ + 3*cpu; * init_workqueues() has already run, so keventd will be setup
rt->data = cpu; * at that time.
rt->function = reap_timer_fnc; */
add_timer_on(rt, cpu); if (keventd_up() && reap_work->func == NULL) {
} INIT_WORK(reap_work, cache_reap, NULL);
} schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
#ifdef CONFIG_HOTPLUG_CPU
static void stop_cpu_timer(int cpu)
{
struct timer_list *rt = &per_cpu(reap_timers, cpu);
if (rt->function) {
del_timer_sync(rt);
WARN_ON(timer_pending(rt));
rt->function = NULL;
} }
} }
#endif
static struct array_cache *alloc_arraycache(int cpu, int entries, int batchcount) static struct array_cache *alloc_arraycache(int cpu, int entries, int batchcount)
{ {
...@@ -654,7 +645,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -654,7 +645,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
break; break;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD: case CPU_DEAD:
stop_cpu_timer(cpu);
/* fall thru */ /* fall thru */
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
down(&cache_chain_sem); down(&cache_chain_sem);
...@@ -2674,24 +2664,23 @@ static void drain_array_locked(kmem_cache_t *cachep, ...@@ -2674,24 +2664,23 @@ static void drain_array_locked(kmem_cache_t *cachep,
/** /**
* cache_reap - Reclaim memory from caches. * cache_reap - Reclaim memory from caches.
* *
* Called from a timer, every few seconds * Called from workqueue/eventd every few seconds.
* Purpose: * Purpose:
* - clear the per-cpu caches for this CPU. * - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool. * - return freeable pages to the main free memory pool.
* *
* If we cannot acquire the cache chain semaphore then just give up - we'll * If we cannot acquire the cache chain semaphore then just give up - we'll
* try again next timer interrupt. * try again on the next iteration.
*/ */
static void cache_reap (void) static void cache_reap(void *unused)
{ {
struct list_head *walk; struct list_head *walk;
#if DEBUG if (down_trylock(&cache_chain_sem)) {
BUG_ON(!in_interrupt()); /* Give up. Setup the next iteration. */
BUG_ON(in_irq()); schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
#endif
if (down_trylock(&cache_chain_sem))
return; return;
}
list_for_each(walk, &cache_chain) { list_for_each(walk, &cache_chain) {
kmem_cache_t *searchp; kmem_cache_t *searchp;
...@@ -2755,22 +2744,8 @@ static void cache_reap (void) ...@@ -2755,22 +2744,8 @@ static void cache_reap (void)
} }
check_irq_on(); check_irq_on();
up(&cache_chain_sem); up(&cache_chain_sem);
} /* Setup the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
/*
* This is a timer handler. There is one per CPU. It is called periodially
* to shrink this CPU's caches. Otherwise there could be memory tied up
* for long periods (or for ever) due to load changes.
*/
static void reap_timer_fnc(unsigned long cpu)
{
struct timer_list *rt = &__get_cpu_var(reap_timers);
/* CPU hotplug can drag us off cpu: don't run on wrong CPU */
if (!cpu_is_offline(cpu)) {
cache_reap();
mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
}
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment