Commit a5aa63a5 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/slab: drain the free slab as much as possible

slabs_tofree() implies freeing all free slab.  We can do it with just
providing INT_MAX.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8888177e
......@@ -895,12 +895,6 @@ static int init_cache_node_node(int node)
return 0;
}
static inline int slabs_tofree(struct kmem_cache *cachep,
struct kmem_cache_node *n)
{
return (n->free_objects + cachep->num - 1) / cachep->num;
}
static void cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
......@@ -965,7 +959,7 @@ static void cpuup_canceled(long cpu)
n = get_node(cachep, node);
if (!n)
continue;
drain_freelist(cachep, n, slabs_tofree(cachep, n));
drain_freelist(cachep, n, INT_MAX);
}
}
......@@ -1117,7 +1111,7 @@ static int __meminit drain_cache_node_node(int node)
if (!n)
continue;
drain_freelist(cachep, n, slabs_tofree(cachep, n));
drain_freelist(cachep, n, INT_MAX);
if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
......@@ -2311,7 +2305,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
check_irq_on();
for_each_kmem_cache_node(cachep, node, n) {
drain_freelist(cachep, n, slabs_tofree(cachep, n));
drain_freelist(cachep, n, INT_MAX);
ret += !list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment