Commit 2d4894b5 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: remove cold parameter from free_hot_cold_page*

Most callers users of free_hot_cold_page claim the pages being released
are cache hot.  The exception is the page reclaim paths where it is
likely that enough pages will be freed in the near future that the
per-cpu lists are going to be recycled and the cache hotness information
is lost.  As no one really cares about the hotness of pages being
released to the allocator, just ditch the parameter.

The APIs are renamed to indicate that it's no longer about hot/cold
pages.  It should also be less confusing as there are subtle differences
between them.  __free_pages drops a reference and frees a page when the
refcount reaches zero.  free_hot_cold_page handled pages whose refcount
was already zero which is non-obvious from the name.  free_unref_page
should be more obvious.

No performance impact is expected as the overhead is marginal.  The
parameter is removed simply because it is a bit stupid to have a useless
parameter copied everywhere.

[mgorman@techsingularity.net: add pages to head, not tail]
  Link: http://lkml.kernel.org/r/20171019154321.qtpzaeftoyyw4iey@techsingularity.net
Link: http://lkml.kernel.org/r/20171018075952.10627-8-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c6f92f9f
...@@ -200,7 +200,7 @@ static void destroy_pagetable_page(struct mm_struct *mm) ...@@ -200,7 +200,7 @@ static void destroy_pagetable_page(struct mm_struct *mm)
/* We allow PTE_FRAG_NR fragments from a PTE page */ /* We allow PTE_FRAG_NR fragments from a PTE page */
if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
pgtable_page_dtor(page); pgtable_page_dtor(page);
free_hot_cold_page(page, 0); free_unref_page(page);
} }
} }
......
...@@ -404,7 +404,7 @@ void pte_fragment_free(unsigned long *table, int kernel) ...@@ -404,7 +404,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
if (!kernel) if (!kernel)
pgtable_page_dtor(page); pgtable_page_dtor(page);
free_hot_cold_page(page, 0); free_unref_page(page);
} }
} }
......
...@@ -2939,7 +2939,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, ...@@ -2939,7 +2939,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm,
if (!page) if (!page)
return NULL; return NULL;
if (!pgtable_page_ctor(page)) { if (!pgtable_page_ctor(page)) {
free_hot_cold_page(page, 0); free_unref_page(page);
return NULL; return NULL;
} }
return (pte_t *) page_address(page); return (pte_t *) page_address(page);
......
...@@ -409,7 +409,7 @@ void __homecache_free_pages(struct page *page, unsigned int order) ...@@ -409,7 +409,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
homecache_change_page_home(page, order, PAGE_HOME_HASH); homecache_change_page_home(page, order, PAGE_HOME_HASH);
if (order == 0) { if (order == 0) {
free_hot_cold_page(page, false); free_unref_page(page);
} else { } else {
init_page_count(page); init_page_count(page);
__free_pages(page, order); __free_pages(page, order);
......
...@@ -530,8 +530,8 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); ...@@ -530,8 +530,8 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order); extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, bool cold); extern void free_unref_page(struct page *page);
extern void free_hot_cold_page_list(struct list_head *list, bool cold); extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache; struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count); extern void __page_frag_cache_drain(struct page *page, unsigned int count);
......
...@@ -172,24 +172,21 @@ TRACE_EVENT(mm_page_free, ...@@ -172,24 +172,21 @@ TRACE_EVENT(mm_page_free,
TRACE_EVENT(mm_page_free_batched, TRACE_EVENT(mm_page_free_batched,
TP_PROTO(struct page *page, int cold), TP_PROTO(struct page *page),
TP_ARGS(page, cold), TP_ARGS(page),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned long, pfn ) __field( unsigned long, pfn )
__field( int, cold )
), ),
TP_fast_assign( TP_fast_assign(
__entry->pfn = page_to_pfn(page); __entry->pfn = page_to_pfn(page);
__entry->cold = cold;
), ),
TP_printk("page=%p pfn=%lu order=0 cold=%d", TP_printk("page=%p pfn=%lu order=0",
pfn_to_page(__entry->pfn), pfn_to_page(__entry->pfn),
__entry->pfn, __entry->pfn)
__entry->cold)
); );
TRACE_EVENT(mm_page_alloc, TRACE_EVENT(mm_page_alloc,
......
...@@ -2611,7 +2611,7 @@ void mark_free_pages(struct zone *zone) ...@@ -2611,7 +2611,7 @@ void mark_free_pages(struct zone *zone)
} }
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn) static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
{ {
int migratetype; int migratetype;
...@@ -2623,8 +2623,7 @@ static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn) ...@@ -2623,8 +2623,7 @@ static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn)
return true; return true;
} }
static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, static void free_unref_page_commit(struct page *page, unsigned long pfn)
bool cold)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
...@@ -2649,10 +2648,7 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, ...@@ -2649,10 +2648,7 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
} }
pcp = &this_cpu_ptr(zone->pageset)->pcp; pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (!cold) list_add(&page->lru, &pcp->lists[migratetype]);
list_add(&page->lru, &pcp->lists[migratetype]);
else
list_add_tail(&page->lru, &pcp->lists[migratetype]);
pcp->count++; pcp->count++;
if (pcp->count >= pcp->high) { if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch); unsigned long batch = READ_ONCE(pcp->batch);
...@@ -2663,25 +2659,24 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, ...@@ -2663,25 +2659,24 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
/* /*
* Free a 0-order page * Free a 0-order page
* cold == true ? free a cold page : free a hot page
*/ */
void free_hot_cold_page(struct page *page, bool cold) void free_unref_page(struct page *page)
{ {
unsigned long flags; unsigned long flags;
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
if (!free_hot_cold_page_prepare(page, pfn)) if (!free_unref_page_prepare(page, pfn))
return; return;
local_irq_save(flags); local_irq_save(flags);
free_hot_cold_page_commit(page, pfn, cold); free_unref_page_commit(page, pfn);
local_irq_restore(flags); local_irq_restore(flags);
} }
/* /*
* Free a list of 0-order pages * Free a list of 0-order pages
*/ */
void free_hot_cold_page_list(struct list_head *list, bool cold) void free_unref_page_list(struct list_head *list)
{ {
struct page *page, *next; struct page *page, *next;
unsigned long flags, pfn; unsigned long flags, pfn;
...@@ -2689,7 +2684,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) ...@@ -2689,7 +2684,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
/* Prepare pages for freeing */ /* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) { list_for_each_entry_safe(page, next, list, lru) {
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
if (!free_hot_cold_page_prepare(page, pfn)) if (!free_unref_page_prepare(page, pfn))
list_del(&page->lru); list_del(&page->lru);
set_page_private(page, pfn); set_page_private(page, pfn);
} }
...@@ -2699,8 +2694,8 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) ...@@ -2699,8 +2694,8 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
unsigned long pfn = page_private(page); unsigned long pfn = page_private(page);
set_page_private(page, 0); set_page_private(page, 0);
trace_mm_page_free_batched(page, cold); trace_mm_page_free_batched(page);
free_hot_cold_page_commit(page, pfn, cold); free_unref_page_commit(page, pfn);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -4301,7 +4296,7 @@ void __free_pages(struct page *page, unsigned int order) ...@@ -4301,7 +4296,7 @@ void __free_pages(struct page *page, unsigned int order)
{ {
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
if (order == 0) if (order == 0)
free_hot_cold_page(page, false); free_unref_page(page);
else else
__free_pages_ok(page, order); __free_pages_ok(page, order);
} }
...@@ -4359,7 +4354,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) ...@@ -4359,7 +4354,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
unsigned int order = compound_order(page); unsigned int order = compound_order(page);
if (order == 0) if (order == 0)
free_hot_cold_page(page, false); free_unref_page(page);
else else
__free_pages_ok(page, order); __free_pages_ok(page, order);
} }
......
...@@ -1321,7 +1321,7 @@ void page_remove_rmap(struct page *page, bool compound) ...@@ -1321,7 +1321,7 @@ void page_remove_rmap(struct page *page, bool compound)
* It would be tidy to reset the PageAnon mapping here, * It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap * but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping * which increments mapcount after us but sets mapping
* before us: so leave the reset to free_hot_cold_page, * before us: so leave the reset to free_unref_page,
* and remember that it's only reliable while mapped. * and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes * Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache. * faster for those pages still in swapcache.
......
...@@ -76,7 +76,7 @@ static void __page_cache_release(struct page *page) ...@@ -76,7 +76,7 @@ static void __page_cache_release(struct page *page)
static void __put_single_page(struct page *page) static void __put_single_page(struct page *page)
{ {
__page_cache_release(page); __page_cache_release(page);
free_hot_cold_page(page, false); free_unref_page(page);
} }
static void __put_compound_page(struct page *page) static void __put_compound_page(struct page *page)
...@@ -817,7 +817,7 @@ void release_pages(struct page **pages, int nr) ...@@ -817,7 +817,7 @@ void release_pages(struct page **pages, int nr)
spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
mem_cgroup_uncharge_list(&pages_to_free); mem_cgroup_uncharge_list(&pages_to_free);
free_hot_cold_page_list(&pages_to_free, 0); free_unref_page_list(&pages_to_free);
} }
EXPORT_SYMBOL(release_pages); EXPORT_SYMBOL(release_pages);
......
...@@ -1349,7 +1349,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1349,7 +1349,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
mem_cgroup_uncharge_list(&free_pages); mem_cgroup_uncharge_list(&free_pages);
try_to_unmap_flush(); try_to_unmap_flush();
free_hot_cold_page_list(&free_pages, true); free_unref_page_list(&free_pages);
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, pgactivate);
...@@ -1824,7 +1824,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1824,7 +1824,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&page_list); mem_cgroup_uncharge_list(&page_list);
free_hot_cold_page_list(&page_list, true); free_unref_page_list(&page_list);
/* /*
* If reclaim is isolating dirty pages under writeback, it implies * If reclaim is isolating dirty pages under writeback, it implies
...@@ -2063,7 +2063,7 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -2063,7 +2063,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&l_hold); mem_cgroup_uncharge_list(&l_hold);
free_hot_cold_page_list(&l_hold, true); free_unref_page_list(&l_hold);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file); nr_deactivate, nr_rotated, sc->priority, file);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment