Commit 2c488db2 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

memcg: clean up memory thresholds

Introduce struct mem_cgroup_thresholds.  It helps to reduce number of
checks of thresholds type (memory or mem+swap).

[akpm@linux-foundation.org: repair comment]
Signed-off-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Phil Carmody <ext-phil.2.carmody@nokia.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarPaul Menage <menage@google.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 907860ed
...@@ -158,6 +158,18 @@ struct mem_cgroup_threshold_ary { ...@@ -158,6 +158,18 @@ struct mem_cgroup_threshold_ary {
/* Array of thresholds */ /* Array of thresholds */
struct mem_cgroup_threshold entries[0]; struct mem_cgroup_threshold entries[0];
}; };
struct mem_cgroup_thresholds {
/* Primary thresholds array */
struct mem_cgroup_threshold_ary *primary;
/*
* Spare threshold array.
* This is needed to make mem_cgroup_unregister_event() "never fail".
* It must be able to store at least primary->size - 1 entries.
*/
struct mem_cgroup_threshold_ary *spare;
};
/* for OOM */ /* for OOM */
struct mem_cgroup_eventfd_list { struct mem_cgroup_eventfd_list {
struct list_head list; struct list_head list;
...@@ -224,20 +236,10 @@ struct mem_cgroup { ...@@ -224,20 +236,10 @@ struct mem_cgroup {
struct mutex thresholds_lock; struct mutex thresholds_lock;
/* thresholds for memory usage. RCU-protected */ /* thresholds for memory usage. RCU-protected */
struct mem_cgroup_threshold_ary *thresholds; struct mem_cgroup_thresholds thresholds;
/*
* Preallocated buffer to be used in mem_cgroup_unregister_event()
* to make it "never fail".
* It must be able to store at least thresholds->size - 1 entries.
*/
struct mem_cgroup_threshold_ary *__thresholds;
/* thresholds for mem+swap usage. RCU-protected */ /* thresholds for mem+swap usage. RCU-protected */
struct mem_cgroup_threshold_ary *memsw_thresholds; struct mem_cgroup_thresholds memsw_thresholds;
/* the same as __thresholds, but for memsw_thresholds */
struct mem_cgroup_threshold_ary *__memsw_thresholds;
/* For oom notifier event fd */ /* For oom notifier event fd */
struct list_head oom_notify; struct list_head oom_notify;
...@@ -3467,9 +3469,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) ...@@ -3467,9 +3469,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
rcu_read_lock(); rcu_read_lock();
if (!swap) if (!swap)
t = rcu_dereference(memcg->thresholds); t = rcu_dereference(memcg->thresholds.primary);
else else
t = rcu_dereference(memcg->memsw_thresholds); t = rcu_dereference(memcg->memsw_thresholds.primary);
if (!t) if (!t)
goto unlock; goto unlock;
...@@ -3543,91 +3545,78 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp, ...@@ -3543,91 +3545,78 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
int type = MEMFILE_TYPE(cft->private); int type = MEMFILE_TYPE(cft->private);
u64 threshold, usage; u64 threshold, usage;
int size; int i, size, ret;
int i, ret;
ret = res_counter_memparse_write_strategy(args, &threshold); ret = res_counter_memparse_write_strategy(args, &threshold);
if (ret) if (ret)
return ret; return ret;
mutex_lock(&memcg->thresholds_lock); mutex_lock(&memcg->thresholds_lock);
if (type == _MEM) if (type == _MEM)
thresholds = memcg->thresholds; thresholds = &memcg->thresholds;
else if (type == _MEMSWAP) else if (type == _MEMSWAP)
thresholds = memcg->memsw_thresholds; thresholds = &memcg->memsw_thresholds;
else else
BUG(); BUG();
usage = mem_cgroup_usage(memcg, type == _MEMSWAP); usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before adding a new one */ /* Check if a threshold crossed before adding a new one */
if (thresholds) if (thresholds->primary)
__mem_cgroup_threshold(memcg, type == _MEMSWAP); __mem_cgroup_threshold(memcg, type == _MEMSWAP);
if (thresholds) size = thresholds->primary ? thresholds->primary->size + 1 : 1;
size = thresholds->size + 1;
else
size = 1;
/* Allocate memory for new array of thresholds */ /* Allocate memory for new array of thresholds */
thresholds_new = kmalloc(sizeof(*thresholds_new) + new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
size * sizeof(struct mem_cgroup_threshold),
GFP_KERNEL); GFP_KERNEL);
if (!thresholds_new) { if (!new) {
ret = -ENOMEM; ret = -ENOMEM;
goto unlock; goto unlock;
} }
thresholds_new->size = size; new->size = size;
/* Copy thresholds (if any) to new array */ /* Copy thresholds (if any) to new array */
if (thresholds) if (thresholds->primary) {
memcpy(thresholds_new->entries, thresholds->entries, memcpy(new->entries, thresholds->primary->entries, (size - 1) *
thresholds->size *
sizeof(struct mem_cgroup_threshold)); sizeof(struct mem_cgroup_threshold));
}
/* Add new threshold */ /* Add new threshold */
thresholds_new->entries[size - 1].eventfd = eventfd; new->entries[size - 1].eventfd = eventfd;
thresholds_new->entries[size - 1].threshold = threshold; new->entries[size - 1].threshold = threshold;
/* Sort thresholds. Registering of new threshold isn't time-critical */ /* Sort thresholds. Registering of new threshold isn't time-critical */
sort(thresholds_new->entries, size, sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
sizeof(struct mem_cgroup_threshold),
compare_thresholds, NULL); compare_thresholds, NULL);
/* Find current threshold */ /* Find current threshold */
thresholds_new->current_threshold = -1; new->current_threshold = -1;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
if (thresholds_new->entries[i].threshold < usage) { if (new->entries[i].threshold < usage) {
/* /*
* thresholds_new->current_threshold will not be used * new->current_threshold will not be used until
* until rcu_assign_pointer(), so it's safe to increment * rcu_assign_pointer(), so it's safe to increment
* it here. * it here.
*/ */
++thresholds_new->current_threshold; ++new->current_threshold;
} }
} }
if (type == _MEM) /* Free old spare buffer and save old primary buffer as spare */
rcu_assign_pointer(memcg->thresholds, thresholds_new); kfree(thresholds->spare);
else thresholds->spare = thresholds->primary;
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
rcu_assign_pointer(thresholds->primary, new);
/* To be sure that nobody uses thresholds */ /* To be sure that nobody uses thresholds */
synchronize_rcu(); synchronize_rcu();
/*
* Free old preallocated buffer and use thresholds as new
* preallocated buffer.
*/
if (type == _MEM) {
kfree(memcg->__thresholds);
memcg->__thresholds = thresholds;
} else {
kfree(memcg->__memsw_thresholds);
memcg->__memsw_thresholds = thresholds;
}
unlock: unlock:
mutex_unlock(&memcg->thresholds_lock); mutex_unlock(&memcg->thresholds_lock);
...@@ -3638,17 +3627,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, ...@@ -3638,17 +3627,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd) struct cftype *cft, struct eventfd_ctx *eventfd)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
int type = MEMFILE_TYPE(cft->private); int type = MEMFILE_TYPE(cft->private);
u64 usage; u64 usage;
int size = 0; int i, j, size;
int i, j;
mutex_lock(&memcg->thresholds_lock); mutex_lock(&memcg->thresholds_lock);
if (type == _MEM) if (type == _MEM)
thresholds = memcg->thresholds; thresholds = &memcg->thresholds;
else if (type == _MEMSWAP) else if (type == _MEMSWAP)
thresholds = memcg->memsw_thresholds; thresholds = &memcg->memsw_thresholds;
else else
BUG(); BUG();
...@@ -3664,53 +3653,45 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, ...@@ -3664,53 +3653,45 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
__mem_cgroup_threshold(memcg, type == _MEMSWAP); __mem_cgroup_threshold(memcg, type == _MEMSWAP);
/* Calculate new number of threshold */ /* Calculate new number of threshold */
for (i = 0; i < thresholds->size; i++) { size = 0;
if (thresholds->entries[i].eventfd != eventfd) for (i = 0; i < thresholds->primary->size; i++) {
if (thresholds->primary->entries[i].eventfd != eventfd)
size++; size++;
} }
/* Use preallocated buffer for new array of thresholds */ new = thresholds->spare;
if (type == _MEM)
thresholds_new = memcg->__thresholds;
else
thresholds_new = memcg->__memsw_thresholds;
/* Set thresholds array to NULL if we don't have thresholds */ /* Set thresholds array to NULL if we don't have thresholds */
if (!size) { if (!size) {
kfree(thresholds_new); kfree(new);
thresholds_new = NULL; new = NULL;
goto swap_buffers; goto swap_buffers;
} }
thresholds_new->size = size; new->size = size;
/* Copy thresholds and find current threshold */ /* Copy thresholds and find current threshold */
thresholds_new->current_threshold = -1; new->current_threshold = -1;
for (i = 0, j = 0; i < thresholds->size; i++) { for (i = 0, j = 0; i < thresholds->primary->size; i++) {
if (thresholds->entries[i].eventfd == eventfd) if (thresholds->primary->entries[i].eventfd == eventfd)
continue; continue;
thresholds_new->entries[j] = thresholds->entries[i]; new->entries[j] = thresholds->primary->entries[i];
if (thresholds_new->entries[j].threshold < usage) { if (new->entries[j].threshold < usage) {
/* /*
* thresholds_new->current_threshold will not be used * new->current_threshold will not be used
* until rcu_assign_pointer(), so it's safe to increment * until rcu_assign_pointer(), so it's safe to increment
* it here. * it here.
*/ */
++thresholds_new->current_threshold; ++new->current_threshold;
} }
j++; j++;
} }
swap_buffers: swap_buffers:
/* Swap thresholds array and preallocated buffer */ /* Swap primary and spare array */
if (type == _MEM) { thresholds->spare = thresholds->primary;
memcg->__thresholds = thresholds; rcu_assign_pointer(thresholds->primary, new);
rcu_assign_pointer(memcg->thresholds, thresholds_new);
} else {
memcg->__memsw_thresholds = thresholds;
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
}
/* To be sure that nobody uses thresholds */ /* To be sure that nobody uses thresholds */
synchronize_rcu(); synchronize_rcu();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment