Commit c2f973ba authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slub: detach whole partial list at once in unfreeze_partials()

Instead of iterating through the live percpu partial list, detach it from the
kmem_cache_cpu at once. This is simpler and will allow further optimization.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 8de06a6f
...@@ -2358,16 +2358,20 @@ static void unfreeze_partials(struct kmem_cache *s, ...@@ -2358,16 +2358,20 @@ static void unfreeze_partials(struct kmem_cache *s,
{ {
#ifdef CONFIG_SLUB_CPU_PARTIAL #ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL; struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL; struct page *page, *partial_page, *discard_page = NULL;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
while ((page = slub_percpu_partial(c))) { partial_page = slub_percpu_partial(c);
c->partial = NULL;
while (partial_page) {
struct page new; struct page new;
struct page old; struct page old;
slub_set_percpu_partial(c, page); page = partial_page;
partial_page = page->next;
n2 = get_node(s, page_to_nid(page)); n2 = get_node(s, page_to_nid(page));
if (n != n2) { if (n != n2) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment