Commit 6c357848 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: replace hpage_nr_pages with thp_nr_pages

The thp prefix is more frequently used than hpage and we should be
consistent between the various functions.

[akpm@linux-foundation.org: fix mm/migrate.c]
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/20200629151959.15779-6-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent af3bbc12
......@@ -271,9 +271,14 @@ static inline unsigned int thp_order(struct page *page)
return 0;
}
static inline int hpage_nr_pages(struct page *page)
/**
* thp_nr_pages - The number of regular pages in this huge page.
* @page: The head page of a huge page.
*/
static inline int thp_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
VM_BUG_ON_PGFLAGS(PageTail(page), page);
if (PageHead(page))
return HPAGE_PMD_NR;
return 1;
}
......@@ -336,9 +341,9 @@ static inline unsigned int thp_order(struct page *page)
return 0;
}
static inline int hpage_nr_pages(struct page *page)
static inline int thp_nr_pages(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PGFLAGS(PageTail(page), page);
return 1;
}
......
......@@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
......@@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
}
/**
......
......@@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
if (PageHuge(head))
return head;
return head + (index & (hpage_nr_pages(head) - 1));
return head + (index & (thp_nr_pages(head) - 1));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
......@@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac)
page = xa_load(&rac->mapping->i_pages, rac->_index);
VM_BUG_ON_PAGE(!PageLocked(page), page);
rac->_batch_count = hpage_nr_pages(page);
rac->_batch_count = thp_nr_pages(page);
return page;
}
......@@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
rac->_batch_count += hpage_nr_pages(page);
rac->_batch_count += thp_nr_pages(page);
/*
* The page cache isn't using multi-index entries yet,
......
......@@ -1009,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
del_page_from_lru_list(page, lruvec, page_lru(page));
mod_node_page_state(page_pgdat(page),
NR_ISOLATED_ANON + page_is_file_lru(page),
hpage_nr_pages(page));
thp_nr_pages(page));
isolate_success:
list_add(&page->lru, &cc->migratepages);
......
......@@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
if (PageHuge(page))
return;
nr = hpage_nr_pages(page);
nr = thp_nr_pages(page);
__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
......
......@@ -1637,7 +1637,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_lru(head),
hpage_nr_pages(head));
thp_nr_pages(head));
}
}
}
......
......@@ -369,7 +369,7 @@ extern void clear_page_mlock(struct page *page);
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
if (TestClearPageMlocked(page)) {
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);
/* Holding pmd lock, no change in irq context: __mod is safe */
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
......
......@@ -5589,7 +5589,7 @@ static int mem_cgroup_move_account(struct page *page,
{
struct lruvec *from_vec, *to_vec;
struct pglist_data *pgdat;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
int ret;
VM_BUG_ON(from == to);
......@@ -6682,7 +6682,7 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
*/
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{
unsigned int nr_pages = hpage_nr_pages(page);
unsigned int nr_pages = thp_nr_pages(page);
struct mem_cgroup *memcg = NULL;
int ret = 0;
......@@ -6912,7 +6912,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
return;
/* Force-charge the new page. The old one will be freed soon */
nr_pages = hpage_nr_pages(newpage);
nr_pages = thp_nr_pages(newpage);
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
......@@ -7114,7 +7114,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* ancestor for the swap instead and transfer the memory+swap charge.
*/
swap_memcg = mem_cgroup_id_get_online(memcg);
nr_entries = hpage_nr_pages(page);
nr_entries = thp_nr_pages(page);
/* Get references for the tail pages, too */
if (nr_entries > 1)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
......@@ -7158,7 +7158,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
*/
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
unsigned int nr_pages = hpage_nr_pages(page);
unsigned int nr_pages = thp_nr_pages(page);
struct page_counter *counter;
struct mem_cgroup *memcg;
unsigned short oldid;
......
......@@ -1299,7 +1299,7 @@ static int
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
struct page *page, *head;
int ret = 0;
LIST_HEAD(source);
......@@ -1307,15 +1307,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
head = compound_head(page);
if (PageHuge(page)) {
struct page *head = compound_head(page);
pfn = page_to_pfn(head) + compound_nr(head) - 1;
isolate_huge_page(head, &source);
continue;
} else if (PageTransHuge(page))
pfn = page_to_pfn(compound_head(page))
+ hpage_nr_pages(page) - 1;
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
/*
* HWPoison pages have elevated reference counts so the migration would
......
......@@ -1049,7 +1049,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_lru(head),
hpage_nr_pages(head));
thp_nr_pages(head));
} else if (flags & MPOL_MF_STRICT) {
/*
* Non-movable page may reach here. And, there may be
......
......@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
put_page(page);
} else {
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_lru(page), -hpage_nr_pages(page));
page_is_file_lru(page), -thp_nr_pages(page));
putback_lru_page(page);
}
}
......@@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
*/
expected_count += is_device_private_page(page);
if (mapping)
expected_count += hpage_nr_pages(page) + page_has_private(page);
expected_count += thp_nr_pages(page) + page_has_private(page);
return expected_count;
}
......@@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
......@@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
* to one less reference.
* We know this isn't the last reference.
*/
page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
......@@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
} else {
/* thp page */
BUG_ON(!PageTransHuge(src));
nr_pages = hpage_nr_pages(src);
nr_pages = thp_nr_pages(src);
}
for (i = 0; i < nr_pages; i++) {
......@@ -1213,7 +1213,7 @@ static int unmap_and_move(new_page_t get_new_page,
*/
if (likely(!__PageMovable(page)))
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_lru(page), -hpage_nr_pages(page));
page_is_file_lru(page), -thp_nr_pages(page));
}
/*
......@@ -1446,7 +1446,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
* during migration.
*/
is_thp = PageTransHuge(page);
nr_subpages = hpage_nr_pages(page);
nr_subpages = thp_nr_pages(page);
cond_resched();
if (PageHuge(page))
......@@ -1670,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_lru(head),
hpage_nr_pages(head));
thp_nr_pages(head));
}
out_putpage:
/*
......@@ -2034,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
page_lru = page_is_file_lru(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
hpage_nr_pages(page));
thp_nr_pages(page));
/*
* Isolating the page has taken another reference, so the
......
......@@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page)
if (!TestClearPageMlocked(page))
return;
mod_zone_page_state(page_zone(page), NR_MLOCK,
-hpage_nr_pages(page));
mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
/*
* The previous TestClearPageMlocked() corresponds to the smp_mb()
......@@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page)
if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
......@@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page)
/*
* Serialize with any parallel __split_huge_page_refcount() which
* might otherwise copy PageMlocked to part of the tail pages before
* we clear it in the head page. It also stabilizes hpage_nr_pages().
* we clear it in the head page. It also stabilizes thp_nr_pages().
*/
spin_lock_irq(&pgdat->lru_lock);
......@@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page)
goto unlock_out;
}
nr_pages = hpage_nr_pages(page);
nr_pages = thp_nr_pages(page);
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
if (__munlock_isolate_lru_page(page, true)) {
......
......@@ -274,7 +274,7 @@ static inline void count_swpout_vm_event(struct page *page)
if (unlikely(PageTransHuge(page)))
count_vm_event(THP_SWPOUT);
#endif
count_vm_events(PSWPOUT, hpage_nr_pages(page));
count_vm_events(PSWPOUT, thp_nr_pages(page));
}
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
......
......@@ -61,7 +61,7 @@ static inline bool pfn_is_match(struct page *page, unsigned long pfn)
return page_pfn == pfn;
/* THP can be referenced by any subpage */
return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page);
return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
}
/**
......
......@@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page,
}
if (first) {
int nr = compound ? hpage_nr_pages(page) : 1;
int nr = compound ? thp_nr_pages(page) : 1;
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
......@@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page,
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, bool compound)
{
int nr = compound ? hpage_nr_pages(page) : 1;
int nr = compound ? thp_nr_pages(page) : 1;
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
__SetPageSwapBacked(page);
......@@ -1860,7 +1860,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
return;
pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
......@@ -1913,7 +1913,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
return;
pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap,
......
......@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
del_page_from_lru_list(page, lruvec, page_lru(page));
ClearPageActive(page);
add_page_to_lru_list_tail(page, lruvec, page_lru(page));
(*pgmoved) += hpage_nr_pages(page);
(*pgmoved) += thp_nr_pages(page);
}
}
......@@ -312,7 +312,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
void lru_note_cost_page(struct page *page)
{
lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
page_is_file_lru(page), hpage_nr_pages(page));
page_is_file_lru(page), thp_nr_pages(page));
}
static void __activate_page(struct page *page, struct lruvec *lruvec,
......@@ -320,7 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page);
......@@ -500,7 +500,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
* lock is held(spinlock), which implies preemption disabled.
*/
__mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
lru_cache_add(page);
......@@ -532,7 +532,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
{
int lru;
bool active;
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);
if (!PageLRU(page))
return;
......@@ -580,7 +580,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
ClearPageActive(page);
......@@ -599,7 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active);
......@@ -972,7 +972,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
{
enum lru_list lru;
int was_unevictable = TestClearPageUnevictable(page);
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);
VM_BUG_ON_PAGE(PageLRU(page), page);
......
......@@ -130,7 +130,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
unsigned long i, nr = hpage_nr_pages(page);
unsigned long i, nr = thp_nr_pages(page);
void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page);
......@@ -183,7 +183,7 @@ void __delete_from_swap_cache(struct page *page,
swp_entry_t entry, void *shadow)
{
struct address_space *address_space = swap_address_space(entry);
int i, nr = hpage_nr_pages(page);
int i, nr = thp_nr_pages(page);
pgoff_t idx = swp_offset(entry);
XA_STATE(xas, &address_space->i_pages, idx);
......@@ -278,7 +278,7 @@ void delete_from_swap_cache(struct page *page)
xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry);
page_ref_sub(page, hpage_nr_pages(page));
page_ref_sub(page, thp_nr_pages(page));
}
void clear_shadow_from_swap_cache(int type, unsigned long begin,
......
......@@ -1370,7 +1370,7 @@ void put_swap_page(struct page *page, swp_entry_t entry)
unsigned char *map;
unsigned int i, free_entries = 0;
unsigned char val;
int size = swap_entry_size(hpage_nr_pages(page));
int size = swap_entry_size(thp_nr_pages(page));
si = _swap_info_get(entry);
if (!si)
......
......@@ -1354,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
stat->nr_pageout += hpage_nr_pages(page);
stat->nr_pageout += thp_nr_pages(page);
if (PageWriteback(page))
goto keep;
......@@ -1862,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
SetPageLRU(page);
lru = page_lru(page);
nr_pages = hpage_nr_pages(page);
nr_pages = thp_nr_pages(page);
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
......@@ -2065,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
* so we ignore them here.
*/
if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
nr_rotated += hpage_nr_pages(page);
nr_rotated += thp_nr_pages(page);
list_add(&page->lru, &l_active);
continue;
}
......
......@@ -263,7 +263,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
VM_BUG_ON_PAGE(!PageLocked(page), page);
lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
workingset_age_nonresident(lruvec, hpage_nr_pages(page));
workingset_age_nonresident(lruvec, thp_nr_pages(page));
/* XXX: target_memcg can be NULL, go through lruvec */
memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
eviction = atomic_long_read(&lruvec->nonresident_age);
......@@ -374,7 +374,7 @@ void workingset_refault(struct page *page, void *shadow)
goto out;
SetPageActive(page);
workingset_age_nonresident(lruvec, hpage_nr_pages(page));
workingset_age_nonresident(lruvec, thp_nr_pages(page));
inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
/* Page was active prior to eviction */
......@@ -411,7 +411,7 @@ void workingset_activation(struct page *page)
if (!mem_cgroup_disabled() && !memcg)
goto out;
lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
workingset_age_nonresident(lruvec, hpage_nr_pages(page));
workingset_age_nonresident(lruvec, thp_nr_pages(page));
out:
rcu_read_unlock();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment