Commit f45840b5 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

mm: pagecache insertion fewer atomics

Setting and clearing the page locked when inserting it into swapcache /
pagecache when it has no other references can use non-atomic page flags
operations because no other CPU may be operating on it at this time.

This saves one atomic operation when inserting a page into pagecache.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9978ad58
...@@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page); ...@@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page); extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page); extern void unlock_page(struct page *page);
static inline void set_page_locked(struct page *page) static inline void __set_page_locked(struct page *page)
{ {
set_bit(PG_locked, &page->flags); __set_bit(PG_locked, &page->flags);
} }
static inline void clear_page_locked(struct page *page) static inline void __clear_page_locked(struct page *page)
{ {
clear_bit(PG_locked, &page->flags); __clear_bit(PG_locked, &page->flags);
} }
static inline int trylock_page(struct page *page) static inline int trylock_page(struct page *page)
...@@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page); ...@@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
/* /*
* Like add_to_page_cache_locked, but used to add newly allocated pages: * Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run set_page_locked() against it. * the page is new, so we can just run __set_page_locked() against it.
*/ */
static inline int add_to_page_cache(struct page *page, static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{ {
int error; int error;
set_page_locked(page); __set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error)) if (unlikely(error))
clear_page_locked(page); __clear_page_locked(page);
return error; return error;
} }
......
...@@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* re-using the just freed swap entry for an existing page. * re-using the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed. * May fail (-ENOMEM) if radix-tree node allocation failed.
*/ */
set_page_locked(new_page); __set_page_locked(new_page);
SetPageSwapBacked(new_page); SetPageSwapBacked(new_page);
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (likely(!err)) { if (likely(!err)) {
...@@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page; return new_page;
} }
ClearPageSwapBacked(new_page); ClearPageSwapBacked(new_page);
clear_page_locked(new_page); __clear_page_locked(new_page);
swap_free(entry); swap_free(entry);
} while (err != -ENOMEM); } while (err != -ENOMEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment