Commit 2a904905 authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slub: extract get_partial() from new_slab_objects()

The later patches will need more fine grained control over individual actions
in ___slab_alloc(), the only caller of new_slab_objects(), so this is a first
preparatory step with no functional change.

This adds a goto label that appears unnecessary at this point, but will be
useful for later changes.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
parent 976b805c
......@@ -2613,17 +2613,12 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
void *freelist;
void *freelist = NULL;
struct kmem_cache_cpu *c = *pc;
struct page *page;
WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
freelist = get_partial(s, flags, node, c);
if (freelist)
return freelist;
page = new_slab(s, flags, node);
if (page) {
c = raw_cpu_ptr(s->cpu_slab);
......@@ -2787,6 +2782,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto redo;
}
freelist = get_partial(s, gfpflags, node, c);
if (freelist)
goto check_new_page;
freelist = new_slab_objects(s, gfpflags, node, &c);
if (unlikely(!freelist)) {
......@@ -2794,6 +2793,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
return NULL;
}
check_new_page:
page = c->page;
if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment