Commit aae466b0 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/swap: implement workingset detection for anonymous LRU

This patch implements workingset detection for anonymous LRU.  All the
infrastructure is implemented by the previous patches so this patch just
activates the workingset detection by installing/retrieving the shadow
entry and adding refault calculation.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Link: http://lkml.kernel.org/r/1595490560-15117-6-git-send-email-iamjoonsoo.kim@lge.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3852f676
...@@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[]; ...@@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[];
extern unsigned long total_swapcache_pages(void); extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void); extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page); extern int add_to_swap(struct page *page);
extern void *get_shadow_from_swap_cache(swp_entry_t entry);
extern int add_to_swap_cache(struct page *page, swp_entry_t entry, extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp, void **shadowp); gfp_t gfp, void **shadowp);
extern void __delete_from_swap_cache(struct page *page, extern void __delete_from_swap_cache(struct page *page,
...@@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page) ...@@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page)
return 0; return 0;
} }
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
{
return NULL;
}
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp_mask, void **shadowp) gfp_t gfp_mask, void **shadowp)
{ {
......
...@@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
int locked; int locked;
int exclusive = 0; int exclusive = 0;
vm_fault_t ret = 0; vm_fault_t ret = 0;
void *shadow = NULL;
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
goto out; goto out;
...@@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_page; goto out_page;
} }
/* shadow = get_shadow_from_swap_cache(entry);
* XXX: Move to lru_cache_add() when it if (shadow)
* supports new vs putback workingset_refault(page, shadow);
*/
spin_lock_irq(&page_pgdat(page)->lru_lock);
lru_note_cost_page(page);
spin_unlock_irq(&page_pgdat(page)->lru_lock);
lru_cache_add(page); lru_cache_add(page);
swap_readpage(page, true); swap_readpage(page, true);
......
...@@ -106,6 +106,20 @@ void show_swap_cache_info(void) ...@@ -106,6 +106,20 @@ void show_swap_cache_info(void)
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
} }
void *get_shadow_from_swap_cache(swp_entry_t entry)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
struct page *page;
page = find_get_entry(address_space, idx);
if (xa_is_value(page))
return page;
if (page)
put_page(page);
return NULL;
}
/* /*
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index. * but sets SwapCache flag and private instead of mapping and index.
...@@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
{ {
struct swap_info_struct *si; struct swap_info_struct *si;
struct page *page; struct page *page;
void *shadow = NULL;
*new_page_allocated = false; *new_page_allocated = false;
...@@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
__SetPageSwapBacked(page); __SetPageSwapBacked(page);
/* May fail (-ENOMEM) if XArray node allocation failed. */ /* May fail (-ENOMEM) if XArray node allocation failed. */
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) { if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
put_swap_page(page, entry); put_swap_page(page, entry);
goto fail_unlock; goto fail_unlock;
} }
...@@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_unlock; goto fail_unlock;
} }
/* XXX: Move to lru_cache_add() when it supports new vs putback */ if (shadow)
spin_lock_irq(&page_pgdat(page)->lru_lock); workingset_refault(page, shadow);
lru_note_cost_page(page);
spin_unlock_irq(&page_pgdat(page)->lru_lock);
/* Caller will initiate read into locked page */ /* Caller will initiate read into locked page */
SetPageWorkingset(page); SetPageWorkingset(page);
......
...@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
{ {
unsigned long flags; unsigned long flags;
int refcount; int refcount;
void *shadow = NULL;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page)); BUG_ON(mapping != page_mapping(page));
...@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) }; swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap); mem_cgroup_swapout(page, swap);
__delete_from_swap_cache(page, swap, NULL); if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(page, target_memcg);
__delete_from_swap_cache(page, swap, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irqrestore(&mapping->i_pages, flags);
put_swap_page(page, swap); put_swap_page(page, swap);
workingset_eviction(page, target_memcg);
} else { } else {
void (*freepage)(struct page *); void (*freepage)(struct page *);
void *shadow = NULL;
freepage = mapping->a_ops->freepage; freepage = mapping->a_ops->freepage;
/* /*
......
...@@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow) ...@@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow)
/* /*
* Compare the distance to the existing workingset size. We * Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if * don't activate pages that couldn't stay resident even if
* all the memory was available to the page cache. Whether * all the memory was available to the workingset. Whether
* cache can compete with anon or not depends on having swap. * workingset competition needs to consider anon or not depends
* on having swap.
*/ */
workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { if (!file) {
workingset_size += lruvec_page_state(eviction_lruvec, workingset_size += lruvec_page_state(eviction_lruvec,
NR_INACTIVE_ANON); NR_INACTIVE_FILE);
}
if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
workingset_size += lruvec_page_state(eviction_lruvec, workingset_size += lruvec_page_state(eviction_lruvec,
NR_ACTIVE_ANON); NR_ACTIVE_ANON);
if (file) {
workingset_size += lruvec_page_state(eviction_lruvec,
NR_INACTIVE_ANON);
}
} }
if (refault_distance > workingset_size) if (refault_distance > workingset_size)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment