Commit 30472509 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: remove irqsave/restore locking from contexts with irqs enabled

The page cache deletion paths all have interrupts enabled, so no need to
use irqsafe/irqrestore locking variants.

They used to have irqs disabled by the memcg lock added in commit
c4843a75 ("memcg: add per cgroup dirty page accounting"), but that has
since been replaced by memcg taking the page lock instead, commit
0a31bc97 ("mm: memcontrol: rewrite uncharge AP").

Link: https://lkml.kernel.org/r/20210614211904.14420-1-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 20792ebf
...@@ -258,12 +258,11 @@ static void page_cache_free_page(struct address_space *mapping, ...@@ -258,12 +258,11 @@ static void page_cache_free_page(struct address_space *mapping,
void delete_from_page_cache(struct page *page) void delete_from_page_cache(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
unsigned long flags;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
xa_lock_irqsave(&mapping->i_pages, flags); xa_lock_irq(&mapping->i_pages);
__delete_from_page_cache(page, NULL); __delete_from_page_cache(page, NULL);
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
page_cache_free_page(mapping, page); page_cache_free_page(mapping, page);
} }
...@@ -335,19 +334,18 @@ void delete_from_page_cache_batch(struct address_space *mapping, ...@@ -335,19 +334,18 @@ void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec) struct pagevec *pvec)
{ {
int i; int i;
unsigned long flags;
if (!pagevec_count(pvec)) if (!pagevec_count(pvec))
return; return;
xa_lock_irqsave(&mapping->i_pages, flags); xa_lock_irq(&mapping->i_pages);
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
unaccount_page_cache_page(mapping, pvec->pages[i]); unaccount_page_cache_page(mapping, pvec->pages[i]);
} }
page_cache_delete_batch(mapping, pvec); page_cache_delete_batch(mapping, pvec);
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
for (i = 0; i < pagevec_count(pvec); i++) for (i = 0; i < pagevec_count(pvec); i++)
page_cache_free_page(mapping, pvec->pages[i]); page_cache_free_page(mapping, pvec->pages[i]);
...@@ -821,7 +819,6 @@ void replace_page_cache_page(struct page *old, struct page *new) ...@@ -821,7 +819,6 @@ void replace_page_cache_page(struct page *old, struct page *new)
void (*freepage)(struct page *) = mapping->a_ops->freepage; void (*freepage)(struct page *) = mapping->a_ops->freepage;
pgoff_t offset = old->index; pgoff_t offset = old->index;
XA_STATE(xas, &mapping->i_pages, offset); XA_STATE(xas, &mapping->i_pages, offset);
unsigned long flags;
VM_BUG_ON_PAGE(!PageLocked(old), old); VM_BUG_ON_PAGE(!PageLocked(old), old);
VM_BUG_ON_PAGE(!PageLocked(new), new); VM_BUG_ON_PAGE(!PageLocked(new), new);
...@@ -833,7 +830,7 @@ void replace_page_cache_page(struct page *old, struct page *new) ...@@ -833,7 +830,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
mem_cgroup_migrate(old, new); mem_cgroup_migrate(old, new);
xas_lock_irqsave(&xas, flags); xas_lock_irq(&xas);
xas_store(&xas, new); xas_store(&xas, new);
old->mapping = NULL; old->mapping = NULL;
...@@ -846,7 +843,7 @@ void replace_page_cache_page(struct page *old, struct page *new) ...@@ -846,7 +843,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
__dec_lruvec_page_state(old, NR_SHMEM); __dec_lruvec_page_state(old, NR_SHMEM);
if (PageSwapBacked(new)) if (PageSwapBacked(new))
__inc_lruvec_page_state(new, NR_SHMEM); __inc_lruvec_page_state(new, NR_SHMEM);
xas_unlock_irqrestore(&xas, flags); xas_unlock_irq(&xas);
if (freepage) if (freepage)
freepage(old); freepage(old);
put_page(old); put_page(old);
......
...@@ -560,21 +560,19 @@ void invalidate_mapping_pagevec(struct address_space *mapping, ...@@ -560,21 +560,19 @@ void invalidate_mapping_pagevec(struct address_space *mapping,
static int static int
invalidate_complete_page2(struct address_space *mapping, struct page *page) invalidate_complete_page2(struct address_space *mapping, struct page *page)
{ {
unsigned long flags;
if (page->mapping != mapping) if (page->mapping != mapping)
return 0; return 0;
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
return 0; return 0;
xa_lock_irqsave(&mapping->i_pages, flags); xa_lock_irq(&mapping->i_pages);
if (PageDirty(page)) if (PageDirty(page))
goto failed; goto failed;
BUG_ON(page_has_private(page)); BUG_ON(page_has_private(page));
__delete_from_page_cache(page, NULL); __delete_from_page_cache(page, NULL);
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
if (mapping->a_ops->freepage) if (mapping->a_ops->freepage)
mapping->a_ops->freepage(page); mapping->a_ops->freepage(page);
...@@ -582,7 +580,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) ...@@ -582,7 +580,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
put_page(page); /* pagecache ref */ put_page(page); /* pagecache ref */
return 1; return 1;
failed: failed:
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
return 0; return 0;
} }
......
...@@ -1052,14 +1052,13 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) ...@@ -1052,14 +1052,13 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
static int __remove_mapping(struct address_space *mapping, struct page *page, static int __remove_mapping(struct address_space *mapping, struct page *page,
bool reclaimed, struct mem_cgroup *target_memcg) bool reclaimed, struct mem_cgroup *target_memcg)
{ {
unsigned long flags;
int refcount; int refcount;
void *shadow = NULL; void *shadow = NULL;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page)); BUG_ON(mapping != page_mapping(page));
xa_lock_irqsave(&mapping->i_pages, flags); xa_lock_irq(&mapping->i_pages);
/* /*
* The non racy check for a busy page. * The non racy check for a busy page.
* *
...@@ -1100,7 +1099,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -1100,7 +1099,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
if (reclaimed && !mapping_exiting(mapping)) if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(page, target_memcg); shadow = workingset_eviction(page, target_memcg);
__delete_from_swap_cache(page, swap, shadow); __delete_from_swap_cache(page, swap, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
put_swap_page(page, swap); put_swap_page(page, swap);
} else { } else {
void (*freepage)(struct page *); void (*freepage)(struct page *);
...@@ -1126,7 +1125,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -1126,7 +1125,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
!mapping_exiting(mapping) && !dax_mapping(mapping)) !mapping_exiting(mapping) && !dax_mapping(mapping))
shadow = workingset_eviction(page, target_memcg); shadow = workingset_eviction(page, target_memcg);
__delete_from_page_cache(page, shadow); __delete_from_page_cache(page, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
if (freepage != NULL) if (freepage != NULL)
freepage(page); freepage(page);
...@@ -1135,7 +1134,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -1135,7 +1134,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
return 1; return 1;
cannot_free: cannot_free:
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irq(&mapping->i_pages);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment