Commit bbec2e15 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: rename page_counter's count/limit into usage/max

This patch renames struct page_counter fields:
  count -> usage
  limit -> max

and the corresponding functions:
  page_counter_limit() -> page_counter_set_max()
  mem_cgroup_get_limit() -> mem_cgroup_get_max()
  mem_cgroup_resize_limit() -> mem_cgroup_resize_max()
  memcg_update_kmem_limit() -> memcg_update_kmem_max()
  memcg_update_tcp_limit() -> memcg_update_tcp_max()

The idea behind this renaming is to have the direct matching
between memory cgroup knobs (low, high, max) and page_counters API.

This is pure renaming, this patch doesn't bring any functional change.

Link: http://lkml.kernel.org/r/20180405185921.4942-1-guro@fb.comSigned-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1c4bc43d
...@@ -467,7 +467,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, ...@@ -467,7 +467,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
void mem_cgroup_handle_over_high(void); void mem_cgroup_handle_over_high(void);
unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg); unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p); struct task_struct *p);
...@@ -858,7 +858,7 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, ...@@ -858,7 +858,7 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
return 0; return 0;
} }
static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{ {
return 0; return 0;
} }
......
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
#include <asm/page.h> #include <asm/page.h>
struct page_counter { struct page_counter {
atomic_long_t count; atomic_long_t usage;
unsigned long limit; unsigned long max;
struct page_counter *parent; struct page_counter *parent;
/* legacy */ /* legacy */
...@@ -25,14 +25,14 @@ struct page_counter { ...@@ -25,14 +25,14 @@ struct page_counter {
static inline void page_counter_init(struct page_counter *counter, static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent) struct page_counter *parent)
{ {
atomic_long_set(&counter->count, 0); atomic_long_set(&counter->usage, 0);
counter->limit = PAGE_COUNTER_MAX; counter->max = PAGE_COUNTER_MAX;
counter->parent = parent; counter->parent = parent;
} }
static inline unsigned long page_counter_read(struct page_counter *counter) static inline unsigned long page_counter_read(struct page_counter *counter)
{ {
return atomic_long_read(&counter->count); return atomic_long_read(&counter->usage);
} }
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
...@@ -41,7 +41,7 @@ bool page_counter_try_charge(struct page_counter *counter, ...@@ -41,7 +41,7 @@ bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages, unsigned long nr_pages,
struct page_counter **fail); struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max, int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages); unsigned long *nr_pages);
......
...@@ -84,7 +84,7 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, ...@@ -84,7 +84,7 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
limit = round_down(PAGE_COUNTER_MAX, limit = round_down(PAGE_COUNTER_MAX,
1 << huge_page_order(&hstates[idx])); 1 << huge_page_order(&hstates[idx]));
ret = page_counter_limit(counter, limit); ret = page_counter_set_max(counter, limit);
VM_BUG_ON(ret); VM_BUG_ON(ret);
} }
} }
...@@ -273,7 +273,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, ...@@ -273,7 +273,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
case RES_USAGE: case RES_USAGE:
return (u64)page_counter_read(counter) * PAGE_SIZE; return (u64)page_counter_read(counter) * PAGE_SIZE;
case RES_LIMIT: case RES_LIMIT:
return (u64)counter->limit * PAGE_SIZE; return (u64)counter->max * PAGE_SIZE;
case RES_MAX_USAGE: case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE; return (u64)counter->watermark * PAGE_SIZE;
case RES_FAILCNT: case RES_FAILCNT:
...@@ -306,7 +306,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, ...@@ -306,7 +306,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
switch (MEMFILE_ATTR(of_cft(of)->private)) { switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_LIMIT: case RES_LIMIT:
mutex_lock(&hugetlb_limit_mutex); mutex_lock(&hugetlb_limit_mutex);
ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages); ret = page_counter_set_max(&h_cg->hugepage[idx], nr_pages);
mutex_unlock(&hugetlb_limit_mutex); mutex_unlock(&hugetlb_limit_mutex);
break; break;
default: default:
......
...@@ -1034,13 +1034,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) ...@@ -1034,13 +1034,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
unsigned long limit; unsigned long limit;
count = page_counter_read(&memcg->memory); count = page_counter_read(&memcg->memory);
limit = READ_ONCE(memcg->memory.limit); limit = READ_ONCE(memcg->memory.max);
if (count < limit) if (count < limit)
margin = limit - count; margin = limit - count;
if (do_memsw_account()) { if (do_memsw_account()) {
count = page_counter_read(&memcg->memsw); count = page_counter_read(&memcg->memsw);
limit = READ_ONCE(memcg->memsw.limit); limit = READ_ONCE(memcg->memsw.max);
if (count <= limit) if (count <= limit)
margin = min(margin, limit - count); margin = min(margin, limit - count);
else else
...@@ -1148,13 +1148,13 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -1148,13 +1148,13 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)), K((u64)page_counter_read(&memcg->memory)),
K((u64)memcg->memory.limit), memcg->memory.failcnt); K((u64)memcg->memory.max), memcg->memory.failcnt);
pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memsw)), K((u64)page_counter_read(&memcg->memsw)),
K((u64)memcg->memsw.limit), memcg->memsw.failcnt); K((u64)memcg->memsw.max), memcg->memsw.failcnt);
pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->kmem)), K((u64)page_counter_read(&memcg->kmem)),
K((u64)memcg->kmem.limit), memcg->kmem.failcnt); K((u64)memcg->kmem.max), memcg->kmem.failcnt);
for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree(iter, memcg) {
pr_info("Memory cgroup stats for "); pr_info("Memory cgroup stats for ");
...@@ -1179,21 +1179,21 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -1179,21 +1179,21 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
/* /*
* Return the memory (and swap, if configured) limit for a memcg. * Return the memory (and swap, if configured) limit for a memcg.
*/ */
unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{ {
unsigned long limit; unsigned long max;
limit = memcg->memory.limit; max = memcg->memory.max;
if (mem_cgroup_swappiness(memcg)) { if (mem_cgroup_swappiness(memcg)) {
unsigned long memsw_limit; unsigned long memsw_max;
unsigned long swap_limit; unsigned long swap_max;
memsw_limit = memcg->memsw.limit; memsw_max = memcg->memsw.max;
swap_limit = memcg->swap.limit; swap_max = memcg->swap.max;
swap_limit = min(swap_limit, (unsigned long)total_swap_pages); swap_max = min(swap_max, (unsigned long)total_swap_pages);
limit = min(limit + swap_limit, memsw_limit); max = min(max + swap_max, memsw_max);
} }
return limit; return max;
} }
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
...@@ -2444,10 +2444,10 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, ...@@ -2444,10 +2444,10 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
} }
#endif #endif
static DEFINE_MUTEX(memcg_limit_mutex); static DEFINE_MUTEX(memcg_max_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
unsigned long limit, bool memsw) unsigned long max, bool memsw)
{ {
bool enlarge = false; bool enlarge = false;
int ret; int ret;
...@@ -2460,22 +2460,22 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, ...@@ -2460,22 +2460,22 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break; break;
} }
mutex_lock(&memcg_limit_mutex); mutex_lock(&memcg_max_mutex);
/* /*
* Make sure that the new limit (memsw or memory limit) doesn't * Make sure that the new limit (memsw or memory limit) doesn't
* break our basic invariant rule memory.limit <= memsw.limit. * break our basic invariant rule memory.max <= memsw.max.
*/ */
limits_invariant = memsw ? limit >= memcg->memory.limit : limits_invariant = memsw ? max >= memcg->memory.max :
limit <= memcg->memsw.limit; max <= memcg->memsw.max;
if (!limits_invariant) { if (!limits_invariant) {
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_max_mutex);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
if (limit > counter->limit) if (max > counter->max)
enlarge = true; enlarge = true;
ret = page_counter_limit(counter, limit); ret = page_counter_set_max(counter, max);
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_max_mutex);
if (!ret) if (!ret)
break; break;
...@@ -2757,7 +2757,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, ...@@ -2757,7 +2757,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
return (u64)page_counter_read(counter) * PAGE_SIZE; return (u64)page_counter_read(counter) * PAGE_SIZE;
case RES_LIMIT: case RES_LIMIT:
return (u64)counter->limit * PAGE_SIZE; return (u64)counter->max * PAGE_SIZE;
case RES_MAX_USAGE: case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE; return (u64)counter->watermark * PAGE_SIZE;
case RES_FAILCNT: case RES_FAILCNT:
...@@ -2871,24 +2871,24 @@ static void memcg_free_kmem(struct mem_cgroup *memcg) ...@@ -2871,24 +2871,24 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
} }
#endif /* !CONFIG_SLOB */ #endif /* !CONFIG_SLOB */
static int memcg_update_kmem_limit(struct mem_cgroup *memcg, static int memcg_update_kmem_max(struct mem_cgroup *memcg,
unsigned long limit) unsigned long max)
{ {
int ret; int ret;
mutex_lock(&memcg_limit_mutex); mutex_lock(&memcg_max_mutex);
ret = page_counter_limit(&memcg->kmem, limit); ret = page_counter_set_max(&memcg->kmem, max);
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_max_mutex);
return ret; return ret;
} }
static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
{ {
int ret; int ret;
mutex_lock(&memcg_limit_mutex); mutex_lock(&memcg_max_mutex);
ret = page_counter_limit(&memcg->tcpmem, limit); ret = page_counter_set_max(&memcg->tcpmem, max);
if (ret) if (ret)
goto out; goto out;
...@@ -2913,7 +2913,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) ...@@ -2913,7 +2913,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
memcg->tcpmem_active = true; memcg->tcpmem_active = true;
} }
out: out:
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_max_mutex);
return ret; return ret;
} }
...@@ -2941,16 +2941,16 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of, ...@@ -2941,16 +2941,16 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
} }
switch (MEMFILE_TYPE(of_cft(of)->private)) { switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM: case _MEM:
ret = mem_cgroup_resize_limit(memcg, nr_pages, false); ret = mem_cgroup_resize_max(memcg, nr_pages, false);
break; break;
case _MEMSWAP: case _MEMSWAP:
ret = mem_cgroup_resize_limit(memcg, nr_pages, true); ret = mem_cgroup_resize_max(memcg, nr_pages, true);
break; break;
case _KMEM: case _KMEM:
ret = memcg_update_kmem_limit(memcg, nr_pages); ret = memcg_update_kmem_max(memcg, nr_pages);
break; break;
case _TCP: case _TCP:
ret = memcg_update_tcp_limit(memcg, nr_pages); ret = memcg_update_tcp_max(memcg, nr_pages);
break; break;
} }
break; break;
...@@ -3126,8 +3126,8 @@ static int memcg_stat_show(struct seq_file *m, void *v) ...@@ -3126,8 +3126,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
/* Hierarchical information */ /* Hierarchical information */
memory = memsw = PAGE_COUNTER_MAX; memory = memsw = PAGE_COUNTER_MAX;
for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
memory = min(memory, mi->memory.limit); memory = min(memory, mi->memory.max);
memsw = min(memsw, mi->memsw.limit); memsw = min(memsw, mi->memsw.max);
} }
seq_printf(m, "hierarchical_memory_limit %llu\n", seq_printf(m, "hierarchical_memory_limit %llu\n",
(u64)memory * PAGE_SIZE); (u64)memory * PAGE_SIZE);
...@@ -3626,7 +3626,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, ...@@ -3626,7 +3626,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
*pheadroom = PAGE_COUNTER_MAX; *pheadroom = PAGE_COUNTER_MAX;
while ((parent = parent_mem_cgroup(memcg))) { while ((parent = parent_mem_cgroup(memcg))) {
unsigned long ceiling = min(memcg->memory.limit, memcg->high); unsigned long ceiling = min(memcg->memory.max, memcg->high);
unsigned long used = page_counter_read(&memcg->memory); unsigned long used = page_counter_read(&memcg->memory);
*pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
...@@ -4319,12 +4319,12 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) ...@@ -4319,12 +4319,12 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
memcg->low = 0; memcg->low = 0;
page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
memcg->high = PAGE_COUNTER_MAX; memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX;
memcg_wb_domain_size_changed(memcg); memcg_wb_domain_size_changed(memcg);
...@@ -5131,7 +5131,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, ...@@ -5131,7 +5131,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
static int memory_max_show(struct seq_file *m, void *v) static int memory_max_show(struct seq_file *m, void *v)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
unsigned long max = READ_ONCE(memcg->memory.limit); unsigned long max = READ_ONCE(memcg->memory.max);
if (max == PAGE_COUNTER_MAX) if (max == PAGE_COUNTER_MAX)
seq_puts(m, "max\n"); seq_puts(m, "max\n");
...@@ -5155,7 +5155,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, ...@@ -5155,7 +5155,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (err) if (err)
return err; return err;
xchg(&memcg->memory.limit, max); xchg(&memcg->memory.max, max);
for (;;) { for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory); unsigned long nr_pages = page_counter_read(&memcg->memory);
...@@ -6074,7 +6074,7 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) ...@@ -6074,7 +6074,7 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return nr_swap_pages; return nr_swap_pages;
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
nr_swap_pages = min_t(long, nr_swap_pages, nr_swap_pages = min_t(long, nr_swap_pages,
READ_ONCE(memcg->swap.limit) - READ_ONCE(memcg->swap.max) -
page_counter_read(&memcg->swap)); page_counter_read(&memcg->swap));
return nr_swap_pages; return nr_swap_pages;
} }
...@@ -6095,7 +6095,7 @@ bool mem_cgroup_swap_full(struct page *page) ...@@ -6095,7 +6095,7 @@ bool mem_cgroup_swap_full(struct page *page)
return false; return false;
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
return true; return true;
return false; return false;
...@@ -6129,7 +6129,7 @@ static u64 swap_current_read(struct cgroup_subsys_state *css, ...@@ -6129,7 +6129,7 @@ static u64 swap_current_read(struct cgroup_subsys_state *css,
static int swap_max_show(struct seq_file *m, void *v) static int swap_max_show(struct seq_file *m, void *v)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
unsigned long max = READ_ONCE(memcg->swap.limit); unsigned long max = READ_ONCE(memcg->swap.max);
if (max == PAGE_COUNTER_MAX) if (max == PAGE_COUNTER_MAX)
seq_puts(m, "max\n"); seq_puts(m, "max\n");
...@@ -6151,9 +6151,9 @@ static ssize_t swap_max_write(struct kernfs_open_file *of, ...@@ -6151,9 +6151,9 @@ static ssize_t swap_max_write(struct kernfs_open_file *of,
if (err) if (err)
return err; return err;
mutex_lock(&memcg_limit_mutex); mutex_lock(&memcg_max_mutex);
err = page_counter_limit(&memcg->swap, max); err = page_counter_set_max(&memcg->swap, max);
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_max_mutex);
if (err) if (err)
return err; return err;
......
...@@ -256,7 +256,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc) ...@@ -256,7 +256,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
int nid; int nid;
if (is_memcg_oom(oc)) { if (is_memcg_oom(oc)) {
oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
return CONSTRAINT_MEMCG; return CONSTRAINT_MEMCG;
} }
......
...@@ -22,7 +22,7 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) ...@@ -22,7 +22,7 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
{ {
long new; long new;
new = atomic_long_sub_return(nr_pages, &counter->count); new = atomic_long_sub_return(nr_pages, &counter->usage);
/* More uncharges than charges? */ /* More uncharges than charges? */
WARN_ON_ONCE(new < 0); WARN_ON_ONCE(new < 0);
} }
...@@ -41,7 +41,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) ...@@ -41,7 +41,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
for (c = counter; c; c = c->parent) { for (c = counter; c; c = c->parent) {
long new; long new;
new = atomic_long_add_return(nr_pages, &c->count); new = atomic_long_add_return(nr_pages, &c->usage);
/* /*
* This is indeed racy, but we can live with some * This is indeed racy, but we can live with some
* inaccuracy in the watermark. * inaccuracy in the watermark.
...@@ -82,9 +82,9 @@ bool page_counter_try_charge(struct page_counter *counter, ...@@ -82,9 +82,9 @@ bool page_counter_try_charge(struct page_counter *counter,
* we either see the new limit or the setter sees the * we either see the new limit or the setter sees the
* counter has changed and retries. * counter has changed and retries.
*/ */
new = atomic_long_add_return(nr_pages, &c->count); new = atomic_long_add_return(nr_pages, &c->usage);
if (new > c->limit) { if (new > c->max) {
atomic_long_sub(nr_pages, &c->count); atomic_long_sub(nr_pages, &c->usage);
/* /*
* This is racy, but we can live with some * This is racy, but we can live with some
* inaccuracy in the failcnt. * inaccuracy in the failcnt.
...@@ -123,20 +123,20 @@ void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) ...@@ -123,20 +123,20 @@ void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
} }
/** /**
* page_counter_limit - limit the number of pages allowed * page_counter_set_max - set the maximum number of pages allowed
* @counter: counter * @counter: counter
* @limit: limit to set * @nr_pages: limit to set
* *
* Returns 0 on success, -EBUSY if the current number of pages on the * Returns 0 on success, -EBUSY if the current number of pages on the
* counter already exceeds the specified limit. * counter already exceeds the specified limit.
* *
* The caller must serialize invocations on the same counter. * The caller must serialize invocations on the same counter.
*/ */
int page_counter_limit(struct page_counter *counter, unsigned long limit) int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
{ {
for (;;) { for (;;) {
unsigned long old; unsigned long old;
long count; long usage;
/* /*
* Update the limit while making sure that it's not * Update the limit while making sure that it's not
...@@ -149,17 +149,17 @@ int page_counter_limit(struct page_counter *counter, unsigned long limit) ...@@ -149,17 +149,17 @@ int page_counter_limit(struct page_counter *counter, unsigned long limit)
* the limit, so if it sees the old limit, we see the * the limit, so if it sees the old limit, we see the
* modified counter and retry. * modified counter and retry.
*/ */
count = atomic_long_read(&counter->count); usage = atomic_long_read(&counter->usage);
if (count > limit) if (usage > nr_pages)
return -EBUSY; return -EBUSY;
old = xchg(&counter->limit, limit); old = xchg(&counter->max, nr_pages);
if (atomic_long_read(&counter->count) <= count) if (atomic_long_read(&counter->usage) <= usage)
return 0; return 0;
counter->limit = old; counter->max = old;
cond_resched(); cond_resched();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment