Commit a019d201 authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slub: move reset of c->page and freelist out of deactivate_slab()

deactivate_slab() removes the cpu slab by merging the cpu freelist with slab's
freelist and putting the slab on the proper node's list. It also sets the
respective kmem_cache_cpu pointers to NULL.

By extracting the kmem_cache_cpu operations from the function, we can make it
not dependent on disabled irqs.

Also if we return a single free pointer from ___slab_alloc, we no longer have
to assign kmem_cache_cpu.page before deactivation or care if somebody preempted
us and assigned a different page to our kmem_cache_cpu in the process.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 4b1f449d
...@@ -2209,10 +2209,13 @@ static void init_kmem_cache_cpus(struct kmem_cache *s) ...@@ -2209,10 +2209,13 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
} }
/* /*
* Remove the cpu slab * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
* unfreezes the slabs and puts it on the proper list.
* Assumes the slab has been already safely taken away from kmem_cache_cpu
* by the caller.
*/ */
static void deactivate_slab(struct kmem_cache *s, struct page *page, static void deactivate_slab(struct kmem_cache *s, struct page *page,
void *freelist, struct kmem_cache_cpu *c) void *freelist)
{ {
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
...@@ -2341,9 +2344,6 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, ...@@ -2341,9 +2344,6 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
discard_slab(s, page); discard_slab(s, page);
stat(s, FREE_SLAB); stat(s, FREE_SLAB);
} }
c->page = NULL;
c->freelist = NULL;
} }
/* /*
...@@ -2468,10 +2468,16 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -2468,10 +2468,16 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{ {
stat(s, CPUSLAB_FLUSH); void *freelist = c->freelist;
deactivate_slab(s, c->page, c->freelist, c); struct page *page = c->page;
c->page = NULL;
c->freelist = NULL;
c->tid = next_tid(c->tid); c->tid = next_tid(c->tid);
deactivate_slab(s, page, freelist);
stat(s, CPUSLAB_FLUSH);
} }
/* /*
...@@ -2769,7 +2775,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2769,7 +2775,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_restore(flags); local_irq_restore(flags);
goto reread_page; goto reread_page;
} }
deactivate_slab(s, page, c->freelist, c); freelist = c->freelist;
c->page = NULL;
c->freelist = NULL;
deactivate_slab(s, page, freelist);
local_irq_restore(flags); local_irq_restore(flags);
new_slab: new_slab:
...@@ -2848,11 +2857,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2848,11 +2857,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
return_single: return_single:
local_irq_save(flags); local_irq_save(flags);
if (unlikely(c->page)) deactivate_slab(s, page, get_freepointer(s, freelist));
flush_slab(s, c);
c->page = page;
deactivate_slab(s, page, get_freepointer(s, freelist), c);
local_irq_restore(flags); local_irq_restore(flags);
return freelist; return freelist;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment