Commit 3b5c86dd authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] kmem_cache_destroy fix

Slab currently has a policy of buffering a single spare page per slab.
We're putting that on the partially-full list, which confuses
kmem_cache_destroy().

So put it on cachep->slabs_free, which is where empty pages go.
parent ca61a009
No related merge requests found
...@@ -1499,9 +1499,9 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp) ...@@ -1499,9 +1499,9 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
if (unlikely(!--slabp->inuse)) { if (unlikely(!--slabp->inuse)) {
/* Was partial or full, now empty. */ /* Was partial or full, now empty. */
list_del(&slabp->list); list_del(&slabp->list);
/* list_add(&slabp->list, &cachep->slabs_free); */ /* We only buffer a single page */
if (unlikely(list_empty(&cachep->slabs_partial))) if (list_empty(&cachep->slabs_free))
list_add(&slabp->list, &cachep->slabs_partial); list_add(&slabp->list, &cachep->slabs_free);
else else
kmem_slab_destroy(cachep, slabp); kmem_slab_destroy(cachep, slabp);
} else if (unlikely(inuse == cachep->num)) { } else if (unlikely(inuse == cachep->num)) {
...@@ -1977,8 +1977,7 @@ static int s_show(struct seq_file *m, void *p) ...@@ -1977,8 +1977,7 @@ static int s_show(struct seq_file *m, void *p)
} }
list_for_each(q,&cachep->slabs_partial) { list_for_each(q,&cachep->slabs_partial) {
slabp = list_entry(q, slab_t, list); slabp = list_entry(q, slab_t, list);
if (slabp->inuse == cachep->num) BUG_ON(slabp->inuse == cachep->num || !slabp->inuse);
BUG();
active_objs += slabp->inuse; active_objs += slabp->inuse;
active_slabs++; active_slabs++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment