Commit f0e45fb4 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: drop unused try/commit/cancel charge API

There are no more users. RIP in peace.

[arnd@arndb.de: fix an unused-function warning]
  Link: http://lkml.kernel.org/r/20200528095640.151454-1-arnd@arndb.deSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-14-hannes@cmpxchg.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9d82c694
...@@ -355,14 +355,6 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, ...@@ -355,14 +355,6 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg); struct mem_cgroup *memcg);
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp);
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
bool lrucare); bool lrucare);
...@@ -846,34 +838,6 @@ static inline enum mem_cgroup_protection mem_cgroup_protected( ...@@ -846,34 +838,6 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(
return MEMCG_PROT_NONE; return MEMCG_PROT_NONE;
} }
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask,
struct mem_cgroup **memcgp)
{
*memcgp = NULL;
return 0;
}
static inline int mem_cgroup_try_charge_delay(struct page *page,
struct mm_struct *mm,
gfp_t gfp_mask,
struct mem_cgroup **memcgp)
{
*memcgp = NULL;
return 0;
}
static inline void mem_cgroup_commit_charge(struct page *page,
struct mem_cgroup *memcg,
bool lrucare)
{
}
static inline void mem_cgroup_cancel_charge(struct page *page,
struct mem_cgroup *memcg)
{
}
static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, bool lrucare) gfp_t gfp_mask, bool lrucare)
{ {
......
...@@ -2641,6 +2641,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -2641,6 +2641,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
return 0; return 0;
} }
#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
{ {
if (mem_cgroup_is_root(memcg)) if (mem_cgroup_is_root(memcg))
...@@ -2652,6 +2653,7 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) ...@@ -2652,6 +2653,7 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
css_put_many(&memcg->css, nr_pages); css_put_many(&memcg->css, nr_pages);
} }
#endif
static void lock_page_lru(struct page *page, int *isolated) static void lock_page_lru(struct page *page, int *isolated)
{ {
...@@ -6499,29 +6501,26 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, ...@@ -6499,29 +6501,26 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
} }
/** /**
* mem_cgroup_try_charge - try charging a page * mem_cgroup_charge - charge a newly allocated page to a cgroup
* @page: page to charge * @page: page to charge
* @mm: mm context of the victim * @mm: mm context of the victim
* @gfp_mask: reclaim mode * @gfp_mask: reclaim mode
* @memcgp: charged memcg return * @lrucare: page might be on the LRU already
* *
* Try to charge @page to the memcg that @mm belongs to, reclaiming * Try to charge @page to the memcg that @mm belongs to, reclaiming
* pages according to @gfp_mask if necessary. * pages according to @gfp_mask if necessary.
* *
* Returns 0 on success, with *@memcgp pointing to the charged memcg. * Returns 0 on success. Otherwise, an error code is returned.
* Otherwise, an error code is returned.
*
* After page->mapping has been set up, the caller must finalize the
* charge with mem_cgroup_commit_charge(). Or abort the transaction
* with mem_cgroup_cancel_charge() in case page instantiation fails.
*/ */
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
gfp_t gfp_mask, struct mem_cgroup **memcgp) bool lrucare)
{ {
unsigned int nr_pages = hpage_nr_pages(page); unsigned int nr_pages = hpage_nr_pages(page);
struct mem_cgroup *memcg = NULL; struct mem_cgroup *memcg = NULL;
int ret = 0; int ret = 0;
VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
goto out; goto out;
...@@ -6553,56 +6552,8 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, ...@@ -6553,56 +6552,8 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
memcg = get_mem_cgroup_from_mm(mm); memcg = get_mem_cgroup_from_mm(mm);
ret = try_charge(memcg, gfp_mask, nr_pages); ret = try_charge(memcg, gfp_mask, nr_pages);
if (ret)
css_put(&memcg->css); goto out_put;
out:
*memcgp = memcg;
return ret;
}
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
int ret;
ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp);
if (*memcgp)
cgroup_throttle_swaprate(page, gfp_mask);
return ret;
}
/**
* mem_cgroup_commit_charge - commit a page charge
* @page: page to charge
* @memcg: memcg to charge the page to
* @lrucare: page might be on LRU already
*
* Finalize a charge transaction started by mem_cgroup_try_charge(),
* after page->mapping has been set up. This must happen atomically
* as part of the page instantiation, i.e. under the page table lock
* for anonymous pages, under the page lock for page and swap cache.
*
* In addition, the page must not be on the LRU during the commit, to
* prevent racing with task migration. If it might be, use @lrucare.
*
* Use mem_cgroup_cancel_charge() to cancel the transaction instead.
*/
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare)
{
unsigned int nr_pages = hpage_nr_pages(page);
VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
if (mem_cgroup_disabled())
return;
/*
* Swap faults will attempt to charge the same page multiple
* times. But reuse_swap_page() might have removed the page
* from swapcache already, so we can't check PageSwapCache().
*/
if (!memcg)
return;
commit_charge(page, memcg, lrucare); commit_charge(page, memcg, lrucare);
...@@ -6620,55 +6571,11 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, ...@@ -6620,55 +6571,11 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
*/ */
mem_cgroup_uncharge_swap(entry, nr_pages); mem_cgroup_uncharge_swap(entry, nr_pages);
} }
}
/**
* mem_cgroup_cancel_charge - cancel a page charge
* @page: page to charge
* @memcg: memcg to charge the page to
*
* Cancel a charge transaction started by mem_cgroup_try_charge().
*/
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
{
unsigned int nr_pages = hpage_nr_pages(page);
if (mem_cgroup_disabled())
return;
/*
* Swap faults will attempt to charge the same page multiple
* times. But reuse_swap_page() might have removed the page
* from swapcache already, so we can't check PageSwapCache().
*/
if (!memcg)
return;
cancel_charge(memcg, nr_pages);
}
/**
* mem_cgroup_charge - charge a newly allocated page to a cgroup
* @page: page to charge
* @mm: mm context of the victim
* @gfp_mask: reclaim mode
* @lrucare: page might be on the LRU already
*
* Try to charge @page to the memcg that @mm belongs to, reclaiming
* pages according to @gfp_mask if necessary.
*
* Returns 0 on success. Otherwise, an error code is returned.
*/
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
bool lrucare)
{
struct mem_cgroup *memcg;
int ret;
ret = mem_cgroup_try_charge(page, mm, gfp_mask, &memcg); out_put:
if (ret) css_put(&memcg->css);
out:
return ret; return ret;
mem_cgroup_commit_charge(page, memcg, lrucare);
return 0;
} }
struct uncharge_gather { struct uncharge_gather {
...@@ -6773,8 +6680,7 @@ static void uncharge_list(struct list_head *page_list) ...@@ -6773,8 +6680,7 @@ static void uncharge_list(struct list_head *page_list)
* mem_cgroup_uncharge - uncharge a page * mem_cgroup_uncharge - uncharge a page
* @page: page to uncharge * @page: page to uncharge
* *
* Uncharge a page previously charged with mem_cgroup_try_charge() and * Uncharge a page previously charged with mem_cgroup_charge().
* mem_cgroup_commit_charge().
*/ */
void mem_cgroup_uncharge(struct page *page) void mem_cgroup_uncharge(struct page *page)
{ {
...@@ -6797,7 +6703,7 @@ void mem_cgroup_uncharge(struct page *page) ...@@ -6797,7 +6703,7 @@ void mem_cgroup_uncharge(struct page *page)
* @page_list: list of pages to uncharge * @page_list: list of pages to uncharge
* *
* Uncharge a list of pages previously charged with * Uncharge a list of pages previously charged with
* mem_cgroup_try_charge() and mem_cgroup_commit_charge(). * mem_cgroup_charge().
*/ */
void mem_cgroup_uncharge_list(struct list_head *page_list) void mem_cgroup_uncharge_list(struct list_head *page_list)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment