Commit 381760ea authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: micro-optimise slab to avoid a function call

Getting and putting objects in SLAB currently requires a function call but
the bulk of the work is related to PFMEMALLOC reserves which are only
consumed when network-backed storage is critical.  Use an inline function
to determine if the function call is required.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Cc: David Miller <davem@davemloft.net>
Cc: Neil Brown <neilb@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b4b9e355
...@@ -118,6 +118,8 @@ ...@@ -118,6 +118,8 @@
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <net/sock.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -965,7 +967,7 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep, ...@@ -965,7 +967,7 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
spin_unlock_irqrestore(&l3->list_lock, flags); spin_unlock_irqrestore(&l3->list_lock, flags);
} }
static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
gfp_t flags, bool force_refill) gfp_t flags, bool force_refill)
{ {
int i; int i;
...@@ -1012,7 +1014,20 @@ static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -1012,7 +1014,20 @@ static void *ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
return objp; return objp;
} }
static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, static inline void *ac_get_obj(struct kmem_cache *cachep,
struct array_cache *ac, gfp_t flags, bool force_refill)
{
void *objp;
if (unlikely(sk_memalloc_socks()))
objp = __ac_get_obj(cachep, ac, flags, force_refill);
else
objp = ac->entry[--ac->avail];
return objp;
}
static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
void *objp) void *objp)
{ {
if (unlikely(pfmemalloc_active)) { if (unlikely(pfmemalloc_active)) {
...@@ -1022,6 +1037,15 @@ static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -1022,6 +1037,15 @@ static void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
set_obj_pfmemalloc(&objp); set_obj_pfmemalloc(&objp);
} }
return objp;
}
static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
void *objp)
{
if (unlikely(sk_memalloc_socks()))
objp = __ac_put_obj(cachep, ac, objp);
ac->entry[ac->avail++] = objp; ac->entry[ac->avail++] = objp;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment