Commit 39b5f29a authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: remove vma arg from page_evictable

page_evictable(page, vma) is an irritant: almost all its callers pass
NULL for vma.  Remove the vma arg and use mlocked_vma_newpage(vma, page)
explicitly in the couple of places it's needed.  But in those places we
don't even need page_evictable() itself!  They're dealing with a freshly
allocated anonymous page, which has no "mapping" and cannot be mlocked yet.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michel Lespinasse <walken@google.com>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ec4d9f62
...@@ -197,12 +197,8 @@ the pages are also "rescued" from the unevictable list in the process of ...@@ -197,12 +197,8 @@ the pages are also "rescued" from the unevictable list in the process of
freeing them. freeing them.
page_evictable() also checks for mlocked pages by testing an additional page page_evictable() also checks for mlocked pages by testing an additional page
flag, PG_mlocked (as wrapped by PageMlocked()). If the page is NOT mlocked, flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is
and a non-NULL VMA is supplied, page_evictable() will check whether the VMA is faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED.
VM_LOCKED via is_mlocked_vma(). is_mlocked_vma() will SetPageMlocked() and
update the appropriate statistics if the vma is VM_LOCKED. This method allows
efficient "culling" of pages in the fault path that are being faulted in to
VM_LOCKED VMAs.
VMSCAN'S HANDLING OF UNEVICTABLE PAGES VMSCAN'S HANDLING OF UNEVICTABLE PAGES
...@@ -651,7 +647,7 @@ PAGE RECLAIM IN shrink_*_list() ...@@ -651,7 +647,7 @@ PAGE RECLAIM IN shrink_*_list()
------------------------------- -------------------------------
shrink_active_list() culls any obviously unevictable pages - i.e. shrink_active_list() culls any obviously unevictable pages - i.e.
!page_evictable(page, NULL) - diverting these to the unevictable list. !page_evictable(page) - diverting these to the unevictable list.
However, shrink_active_list() only sees unevictable pages that made it onto the However, shrink_active_list() only sees unevictable pages that made it onto the
active/inactive lru lists. Note that these pages do not have PageUnevictable active/inactive lru lists. Note that these pages do not have PageUnevictable
set - otherwise they would be on the unevictable list and shrink_active_list set - otherwise they would be on the unevictable list and shrink_active_list
......
...@@ -281,7 +281,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) ...@@ -281,7 +281,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
} }
#endif #endif
extern int page_evictable(struct page *page, struct vm_area_struct *vma); extern int page_evictable(struct page *page);
extern void check_move_unevictable_pages(struct page **, int nr_pages); extern void check_move_unevictable_pages(struct page **, int nr_pages);
extern unsigned long scan_unevictable_pages; extern unsigned long scan_unevictable_pages;
......
...@@ -168,9 +168,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) ...@@ -168,9 +168,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
} }
/* /*
* Called only in fault path via page_evictable() for a new page * Called only in fault path, to determine if a new page is being
* to determine if it's being mapped into a LOCKED vma. * mapped into a LOCKED vma. If it is, mark page as mlocked.
* If so, mark page as mlocked.
*/ */
static inline int mlocked_vma_newpage(struct vm_area_struct *vma, static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
struct page *page) struct page *page)
......
...@@ -1586,7 +1586,7 @@ struct page *ksm_does_need_to_copy(struct page *page, ...@@ -1586,7 +1586,7 @@ struct page *ksm_does_need_to_copy(struct page *page,
SetPageSwapBacked(new_page); SetPageSwapBacked(new_page);
__set_page_locked(new_page); __set_page_locked(new_page);
if (page_evictable(new_page, vma)) if (!mlocked_vma_newpage(vma, new_page))
lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
else else
add_page_to_unevictable_list(new_page); add_page_to_unevictable_list(new_page);
......
...@@ -1080,7 +1080,7 @@ void page_add_new_anon_rmap(struct page *page, ...@@ -1080,7 +1080,7 @@ void page_add_new_anon_rmap(struct page *page,
else else
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__page_set_anon_rmap(page, vma, address, 1); __page_set_anon_rmap(page, vma, address, 1);
if (page_evictable(page, vma)) if (!mlocked_vma_newpage(vma, page))
lru_cache_add_lru(page, LRU_ACTIVE_ANON); lru_cache_add_lru(page, LRU_ACTIVE_ANON);
else else
add_page_to_unevictable_list(page); add_page_to_unevictable_list(page);
......
...@@ -751,7 +751,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, ...@@ -751,7 +751,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
SetPageLRU(page_tail); SetPageLRU(page_tail);
if (page_evictable(page_tail, NULL)) { if (page_evictable(page_tail)) {
if (PageActive(page)) { if (PageActive(page)) {
SetPageActive(page_tail); SetPageActive(page_tail);
active = 1; active = 1;
......
...@@ -553,7 +553,7 @@ void putback_lru_page(struct page *page) ...@@ -553,7 +553,7 @@ void putback_lru_page(struct page *page)
redo: redo:
ClearPageUnevictable(page); ClearPageUnevictable(page);
if (page_evictable(page, NULL)) { if (page_evictable(page)) {
/* /*
* For evictable pages, we can use the cache. * For evictable pages, we can use the cache.
* In event of a race, worst case is we end up with an * In event of a race, worst case is we end up with an
...@@ -587,7 +587,7 @@ void putback_lru_page(struct page *page) ...@@ -587,7 +587,7 @@ void putback_lru_page(struct page *page)
* page is on unevictable list, it never be freed. To avoid that, * page is on unevictable list, it never be freed. To avoid that,
* check after we added it to the list, again. * check after we added it to the list, again.
*/ */
if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
if (!isolate_lru_page(page)) { if (!isolate_lru_page(page)) {
put_page(page); put_page(page);
goto redo; goto redo;
...@@ -709,7 +709,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -709,7 +709,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
sc->nr_scanned++; sc->nr_scanned++;
if (unlikely(!page_evictable(page, NULL))) if (unlikely(!page_evictable(page)))
goto cull_mlocked; goto cull_mlocked;
if (!sc->may_unmap && page_mapped(page)) if (!sc->may_unmap && page_mapped(page))
...@@ -1217,7 +1217,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) ...@@ -1217,7 +1217,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
list_del(&page->lru); list_del(&page->lru);
if (unlikely(!page_evictable(page, NULL))) { if (unlikely(!page_evictable(page))) {
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
putback_lru_page(page); putback_lru_page(page);
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
...@@ -1470,7 +1470,7 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -1470,7 +1470,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
page = lru_to_page(&l_hold); page = lru_to_page(&l_hold);
list_del(&page->lru); list_del(&page->lru);
if (unlikely(!page_evictable(page, NULL))) { if (unlikely(!page_evictable(page))) {
putback_lru_page(page); putback_lru_page(page);
continue; continue;
} }
...@@ -3414,27 +3414,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -3414,27 +3414,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
/* /*
* page_evictable - test whether a page is evictable * page_evictable - test whether a page is evictable
* @page: the page to test * @page: the page to test
* @vma: the VMA in which the page is or will be mapped, may be NULL
* *
* Test whether page is evictable--i.e., should be placed on active/inactive * Test whether page is evictable--i.e., should be placed on active/inactive
* lists vs unevictable list. The vma argument is !NULL when called from the * lists vs unevictable list.
* fault path to determine how to instantate a new page.
* *
* Reasons page might not be evictable: * Reasons page might not be evictable:
* (1) page's mapping marked unevictable * (1) page's mapping marked unevictable
* (2) page is part of an mlocked VMA * (2) page is part of an mlocked VMA
* *
*/ */
int page_evictable(struct page *page, struct vm_area_struct *vma) int page_evictable(struct page *page)
{ {
return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
if (mapping_unevictable(page_mapping(page)))
return 0;
if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
return 0;
return 1;
} }
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM
...@@ -3472,7 +3463,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) ...@@ -3472,7 +3463,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
if (!PageLRU(page) || !PageUnevictable(page)) if (!PageLRU(page) || !PageUnevictable(page))
continue; continue;
if (page_evictable(page, NULL)) { if (page_evictable(page)) {
enum lru_list lru = page_lru_base_type(page); enum lru_list lru = page_lru_base_type(page);
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment