Commit a9d5adee authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/memcontrol: allow to uncharge page without using page->lru field

HMM pages (private or public device pages) are ZONE_DEVICE page and
thus you can not use page->lru fields of those pages. This patch
re-arrange the uncharge to allow single page to be uncharge without
modifying the lru field of the struct page.

There is no change to memcontrol logic, it is the same as it was
before this patch.

Link: http://lkml.kernel.org/r/20170817000548.32038-10-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Sherry Cheung <SCheung@nvidia.com>
Cc: Subhash Gutti <sgutti@nvidia.com>
Cc: Bob Liu <liubo95@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7b2d55d2
...@@ -5540,48 +5540,102 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, ...@@ -5540,48 +5540,102 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
cancel_charge(memcg, nr_pages); cancel_charge(memcg, nr_pages);
} }
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, struct uncharge_gather {
unsigned long nr_anon, unsigned long nr_file, struct mem_cgroup *memcg;
unsigned long nr_kmem, unsigned long nr_huge, unsigned long pgpgout;
unsigned long nr_shmem, struct page *dummy_page) unsigned long nr_anon;
unsigned long nr_file;
unsigned long nr_kmem;
unsigned long nr_huge;
unsigned long nr_shmem;
struct page *dummy_page;
};
static inline void uncharge_gather_clear(struct uncharge_gather *ug)
{ {
unsigned long nr_pages = nr_anon + nr_file + nr_kmem; memset(ug, 0, sizeof(*ug));
}
static void uncharge_batch(const struct uncharge_gather *ug)
{
unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
unsigned long flags; unsigned long flags;
if (!mem_cgroup_is_root(memcg)) { if (!mem_cgroup_is_root(ug->memcg)) {
page_counter_uncharge(&memcg->memory, nr_pages); page_counter_uncharge(&ug->memcg->memory, nr_pages);
if (do_memsw_account()) if (do_memsw_account())
page_counter_uncharge(&memcg->memsw, nr_pages); page_counter_uncharge(&ug->memcg->memsw, nr_pages);
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
page_counter_uncharge(&memcg->kmem, nr_kmem); page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
memcg_oom_recover(memcg); memcg_oom_recover(ug->memcg);
} }
local_irq_save(flags); local_irq_save(flags);
__this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon); __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
__this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file); __this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
__this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge); __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
__this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem); __this_cpu_sub(ug->memcg->stat->count[NR_SHMEM], ug->nr_shmem);
__this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout); __this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
__this_cpu_add(memcg->stat->nr_page_events, nr_pages); __this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
memcg_check_events(memcg, dummy_page); memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags); local_irq_restore(flags);
if (!mem_cgroup_is_root(memcg)) if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&memcg->css, nr_pages); css_put_many(&ug->memcg->css, nr_pages);
}
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
if (!page->mem_cgroup)
return;
/*
* Nobody should be changing or seriously looking at
* page->mem_cgroup at this point, we have fully
* exclusive access to the page.
*/
if (ug->memcg != page->mem_cgroup) {
if (ug->memcg) {
uncharge_batch(ug);
uncharge_gather_clear(ug);
}
ug->memcg = page->mem_cgroup;
}
if (!PageKmemcg(page)) {
unsigned int nr_pages = 1;
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
ug->nr_huge += nr_pages;
}
if (PageAnon(page))
ug->nr_anon += nr_pages;
else {
ug->nr_file += nr_pages;
if (PageSwapBacked(page))
ug->nr_shmem += nr_pages;
}
ug->pgpgout++;
} else {
ug->nr_kmem += 1 << compound_order(page);
__ClearPageKmemcg(page);
}
ug->dummy_page = page;
page->mem_cgroup = NULL;
} }
static void uncharge_list(struct list_head *page_list) static void uncharge_list(struct list_head *page_list)
{ {
struct mem_cgroup *memcg = NULL; struct uncharge_gather ug;
unsigned long nr_shmem = 0;
unsigned long nr_anon = 0;
unsigned long nr_file = 0;
unsigned long nr_huge = 0;
unsigned long nr_kmem = 0;
unsigned long pgpgout = 0;
struct list_head *next; struct list_head *next;
struct page *page;
uncharge_gather_clear(&ug);
/* /*
* Note that the list can be a single page->lru; hence the * Note that the list can be a single page->lru; hence the
...@@ -5589,57 +5643,16 @@ static void uncharge_list(struct list_head *page_list) ...@@ -5589,57 +5643,16 @@ static void uncharge_list(struct list_head *page_list)
*/ */
next = page_list->next; next = page_list->next;
do { do {
struct page *page;
page = list_entry(next, struct page, lru); page = list_entry(next, struct page, lru);
next = page->lru.next; next = page->lru.next;
VM_BUG_ON_PAGE(PageLRU(page), page); uncharge_page(page, &ug);
VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
if (!page->mem_cgroup)
continue;
/*
* Nobody should be changing or seriously looking at
* page->mem_cgroup at this point, we have fully
* exclusive access to the page.
*/
if (memcg != page->mem_cgroup) {
if (memcg) {
uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
nr_kmem, nr_huge, nr_shmem, page);
pgpgout = nr_anon = nr_file = nr_kmem = 0;
nr_huge = nr_shmem = 0;
}
memcg = page->mem_cgroup;
}
if (!PageKmemcg(page)) {
unsigned int nr_pages = 1;
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
nr_huge += nr_pages;
}
if (PageAnon(page))
nr_anon += nr_pages;
else {
nr_file += nr_pages;
if (PageSwapBacked(page))
nr_shmem += nr_pages;
}
pgpgout++;
} else {
nr_kmem += 1 << compound_order(page);
__ClearPageKmemcg(page);
}
page->mem_cgroup = NULL;
} while (next != page_list); } while (next != page_list);
if (memcg) if (ug.memcg)
uncharge_batch(memcg, pgpgout, nr_anon, nr_file, uncharge_batch(&ug);
nr_kmem, nr_huge, nr_shmem, page);
} }
/** /**
...@@ -5651,6 +5664,8 @@ static void uncharge_list(struct list_head *page_list) ...@@ -5651,6 +5664,8 @@ static void uncharge_list(struct list_head *page_list)
*/ */
void mem_cgroup_uncharge(struct page *page) void mem_cgroup_uncharge(struct page *page)
{ {
struct uncharge_gather ug;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
...@@ -5658,8 +5673,9 @@ void mem_cgroup_uncharge(struct page *page) ...@@ -5658,8 +5673,9 @@ void mem_cgroup_uncharge(struct page *page)
if (!page->mem_cgroup) if (!page->mem_cgroup)
return; return;
INIT_LIST_HEAD(&page->lru); uncharge_gather_clear(&ug);
uncharge_list(&page->lru); uncharge_page(page, &ug);
uncharge_batch(&ug);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment