Commit 25c4f304 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/slab: factor out unlikely part of cache_free_alien()

cache_free_alien() is rarely used function when node mismatch.  But, it is
defined with inline attribute so it is inlined to __cache_free() which is
core free function of slab allocator.  It uselessly makes
kmem_cache_free()/kfree() functions large.  What we really need to inline
is just checking node match so this patch factor out other parts of
cache_free_alien() to reduce code size of kmem_cache_free()/ kfree().

<Before>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free

<After>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
0000000000001110 00000000000001b5 T kfree
0000000000000750 0000000000000181 T kmem_cache_free

You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d3aec344
...@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep, ...@@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
} }
} }
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
int node, int page_node)
{ {
int nodeid = page_to_nid(virt_to_page(objp));
struct kmem_cache_node *n; struct kmem_cache_node *n;
struct alien_cache *alien = NULL; struct alien_cache *alien = NULL;
struct array_cache *ac; struct array_cache *ac;
int node;
LIST_HEAD(list); LIST_HEAD(list);
node = numa_mem_id();
/*
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
if (likely(nodeid == node))
return 0;
n = get_node(cachep, node); n = get_node(cachep, node);
STATS_INC_NODEFREES(cachep); STATS_INC_NODEFREES(cachep);
if (n->alien && n->alien[nodeid]) { if (n->alien && n->alien[page_node]) {
alien = n->alien[nodeid]; alien = n->alien[page_node];
ac = &alien->ac; ac = &alien->ac;
spin_lock(&alien->lock); spin_lock(&alien->lock);
if (unlikely(ac->avail == ac->limit)) { if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep); STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, ac, nodeid, &list); __drain_alien_cache(cachep, ac, page_node, &list);
} }
ac_put_obj(cachep, ac, objp); ac_put_obj(cachep, ac, objp);
spin_unlock(&alien->lock); spin_unlock(&alien->lock);
slabs_destroy(cachep, &list); slabs_destroy(cachep, &list);
} else { } else {
n = get_node(cachep, nodeid); n = get_node(cachep, page_node);
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
free_block(cachep, &objp, 1, nodeid, &list); free_block(cachep, &objp, 1, page_node, &list);
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list); slabs_destroy(cachep, &list);
} }
return 1; return 1;
} }
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
int page_node = page_to_nid(virt_to_page(objp));
int node = numa_mem_id();
/*
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
if (likely(node == page_node))
return 0;
return __cache_free_alien(cachep, objp, node, page_node);
}
#endif #endif
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment