Commit 1b3fa04f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] kmem_cache_destroy() forgets to drain all objects

From: Philippe Elie <phil.el@wanadoo.fr>

kmem_cache_destroy() can fail with the following error: slab error in
kmem_cache_destroy(): cache `xxx': Can't free all objects but the cache
user really free'd all objects

This is because drain_array_locked() only frees 80% of thge objects.

Fix it by adding a parameter to drain_array_locked() telling it to drain
100% of the objects.
parent 9150892e
...@@ -1207,7 +1207,8 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) ...@@ -1207,7 +1207,8 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
} }
static void free_block (kmem_cache_t* cachep, void** objpp, int len); static void free_block (kmem_cache_t* cachep, void** objpp, int len);
static void drain_array_locked(kmem_cache_t* cachep, struct array_cache *ac); static void drain_array_locked(kmem_cache_t* cachep,
struct array_cache *ac, int force);
static void do_drain(void *arg) static void do_drain(void *arg)
{ {
...@@ -1228,7 +1229,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep) ...@@ -1228,7 +1229,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
check_irq_on(); check_irq_on();
spin_lock_irq(&cachep->spinlock); spin_lock_irq(&cachep->spinlock);
if (cachep->lists.shared) if (cachep->lists.shared)
drain_array_locked(cachep, cachep->lists.shared); drain_array_locked(cachep, cachep->lists.shared, 1);
spin_unlock_irq(&cachep->spinlock); spin_unlock_irq(&cachep->spinlock);
} }
...@@ -2267,7 +2268,8 @@ static void drain_array(kmem_cache_t *cachep, struct array_cache *ac) ...@@ -2267,7 +2268,8 @@ static void drain_array(kmem_cache_t *cachep, struct array_cache *ac)
} }
} }
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac) static void drain_array_locked(kmem_cache_t *cachep,
struct array_cache *ac, int force)
{ {
int tofree; int tofree;
...@@ -2275,7 +2277,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac) ...@@ -2275,7 +2277,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac)
if (ac->touched) { if (ac->touched) {
ac->touched = 0; ac->touched = 0;
} else if (ac->avail) { } else if (ac->avail) {
tofree = (ac->limit+4)/5; tofree = force ? ac->avail : (ac->limit+4)/5;
if (tofree > ac->avail) { if (tofree > ac->avail) {
tofree = (ac->avail+1)/2; tofree = (ac->avail+1)/2;
} }
...@@ -2286,7 +2288,6 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac) ...@@ -2286,7 +2288,6 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac)
} }
} }
/** /**
* cache_reap - Reclaim memory from caches. * cache_reap - Reclaim memory from caches.
* *
...@@ -2334,7 +2335,7 @@ static inline void cache_reap (void) ...@@ -2334,7 +2335,7 @@ static inline void cache_reap (void)
searchp->lists.next_reap = jiffies + REAPTIMEOUT_LIST3; searchp->lists.next_reap = jiffies + REAPTIMEOUT_LIST3;
if (searchp->lists.shared) if (searchp->lists.shared)
drain_array_locked(searchp, searchp->lists.shared); drain_array_locked(searchp, searchp->lists.shared, 0);
if (searchp->lists.free_touched) { if (searchp->lists.free_touched) {
searchp->lists.free_touched = 0; searchp->lists.free_touched = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment