Commit bec78efd authored by Wei Yang's avatar Wei Yang Committed by Linus Torvalds

mm/memcg: remove unused definitions

Some definitions are left unused, just clean them.

Link: https://lkml.kernel.org/r/20201108003834.12669-1-richard.weiyang@gmail.comSigned-off-by: default avatarWei Yang <richard.weiyang@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a4055888
......@@ -913,41 +913,6 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
/**
* mod_memcg_page_state - update page state statistics
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
* The @page must be locked or the caller must use lock_page_memcg()
* to prevent double accounting when the page is concurrently being
* moved to another memcg:
*
* lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page))
* mod_memcg_page_state(page, state, -1);
* unlock_page(page) or unlock_page_memcg(page)
*
* Kernel pages are an exception to this, since they'll never move.
*/
static inline void __mod_memcg_page_state(struct page *page,
int idx, int val)
{
struct mem_cgroup *memcg = page_memcg(page);
if (memcg)
__mod_memcg_state(memcg, idx, val);
}
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
struct mem_cgroup *memcg = page_memcg(page);
if (memcg)
mod_memcg_state(memcg, idx, val);
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
......@@ -1395,18 +1360,6 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
{
}
static inline void __mod_memcg_page_state(struct page *page,
int idx,
int nr)
{
}
static inline void mod_memcg_page_state(struct page *page,
int idx,
int nr)
{
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
......@@ -1479,34 +1432,6 @@ static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
}
#endif /* CONFIG_MEMCG */
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_state(struct mem_cgroup *memcg,
int idx)
{
__mod_memcg_state(memcg, idx, 1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_state(struct mem_cgroup *memcg,
int idx)
{
__mod_memcg_state(memcg, idx, -1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_page_state(struct page *page,
int idx)
{
__mod_memcg_page_state(page, idx, 1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_page_state(struct page *page,
int idx)
{
__mod_memcg_page_state(page, idx, -1);
}
static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
__mod_lruvec_kmem_state(p, idx, 1);
......@@ -1517,34 +1442,6 @@ static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
__mod_lruvec_kmem_state(p, idx, -1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_state(struct mem_cgroup *memcg,
int idx)
{
mod_memcg_state(memcg, idx, 1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_state(struct mem_cgroup *memcg,
int idx)
{
mod_memcg_state(memcg, idx, -1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_page_state(struct page *page,
int idx)
{
mod_memcg_page_state(page, idx, 1);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_page_state(struct page *page,
int idx)
{
mod_memcg_page_state(page, idx, -1);
}
static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
struct mem_cgroup *memcg;
......@@ -1733,21 +1630,6 @@ static inline void memcg_kmem_uncharge_page(struct page *page, int order)
__memcg_kmem_uncharge_page(page, order);
}
static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
unsigned int nr_pages)
{
if (memcg_kmem_enabled())
return __memcg_kmem_charge(memcg, gfp, nr_pages);
return 0;
}
static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
unsigned int nr_pages)
{
if (memcg_kmem_enabled())
__memcg_kmem_uncharge(memcg, nr_pages);
}
/*
* A helper for accessing memcg's kmem_id, used for getting
* corresponding LRU lists.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment