Commit 3f2b77e3 authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slub: validate slab from partial list or page allocator before making it cpu slab

When we obtain a new slab page from node partial list or page allocator, we
assign it to kmem_cache_cpu, perform some checks, and if they fail, we undo
the assignment.

In order to allow doing the checks without irq disabled, restructure the code
so that the checks are done first, and kmem_cache_cpu.page assignment only
after they pass.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 6c1dbb67
...@@ -2802,10 +2802,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2802,10 +2802,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
freelist = get_partial(s, gfpflags, node, &page); freelist = get_partial(s, gfpflags, node, &page);
if (freelist) { if (freelist)
c->page = page;
goto check_new_page; goto check_new_page;
}
local_irq_restore(flags); local_irq_restore(flags);
put_cpu_ptr(s->cpu_slab); put_cpu_ptr(s->cpu_slab);
...@@ -2818,9 +2816,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2818,9 +2816,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
} }
local_irq_save(flags); local_irq_save(flags);
if (c->page)
flush_slab(s, c);
/* /*
* No other reference to the page yet so we can * No other reference to the page yet so we can
* muck around with it freely without cmpxchg * muck around with it freely without cmpxchg
...@@ -2829,14 +2824,12 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2829,14 +2824,12 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
page->freelist = NULL; page->freelist = NULL;
stat(s, ALLOC_SLAB); stat(s, ALLOC_SLAB);
c->page = page;
check_new_page: check_new_page:
if (kmem_cache_debug(s)) { if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, page, freelist, addr)) { if (!alloc_debug_processing(s, page, freelist, addr)) {
/* Slab failed checks. Next slab needed */ /* Slab failed checks. Next slab needed */
c->page = NULL;
local_irq_restore(flags); local_irq_restore(flags);
goto new_slab; goto new_slab;
} else { } else {
...@@ -2855,10 +2848,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2855,10 +2848,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
*/ */
goto return_single; goto return_single;
if (unlikely(c->page))
flush_slab(s, c);
c->page = page;
goto load_freelist; goto load_freelist;
return_single: return_single:
if (unlikely(c->page))
flush_slab(s, c);
c->page = page;
deactivate_slab(s, page, get_freepointer(s, freelist), c); deactivate_slab(s, page, get_freepointer(s, freelist), c);
local_irq_restore(flags); local_irq_restore(flags);
return freelist; return freelist;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment