Commit 7cf9f3ba authored by Vlastimil Babka's avatar Vlastimil Babka

mm, slub: only disable irq with spin_lock in __unfreeze_partials()

__unfreeze_partials() no longer needs to have irqs disabled, except for making
the spin_lock operations irq-safe, so convert the spin_locks operations and
remove the separate irq handling.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent fc1455f4
...@@ -2352,9 +2352,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page) ...@@ -2352,9 +2352,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
{ {
struct kmem_cache_node *n = NULL, *n2 = NULL; struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL; struct page *page, *discard_page = NULL;
unsigned long flags; unsigned long flags = 0;
local_irq_save(flags);
while (partial_page) { while (partial_page) {
struct page new; struct page new;
...@@ -2366,10 +2364,10 @@ static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page) ...@@ -2366,10 +2364,10 @@ static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
n2 = get_node(s, page_to_nid(page)); n2 = get_node(s, page_to_nid(page));
if (n != n2) { if (n != n2) {
if (n) if (n)
spin_unlock(&n->list_lock); spin_unlock_irqrestore(&n->list_lock, flags);
n = n2; n = n2;
spin_lock(&n->list_lock); spin_lock_irqsave(&n->list_lock, flags);
} }
do { do {
...@@ -2398,9 +2396,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page) ...@@ -2398,9 +2396,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
} }
if (n) if (n)
spin_unlock(&n->list_lock); spin_unlock_irqrestore(&n->list_lock, flags);
local_irq_restore(flags);
while (discard_page) { while (discard_page) {
page = discard_page; page = discard_page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment