Commit b8627835 authored by Li Zefan's avatar Li Zefan Committed by Tejun Heo

memcg: stop using css id

Now memcg uses cgroup id instead of css id. Update some comments and
set mem_cgroup_subsys->use_id to 0.
Signed-off-by: default avatarLi Zefan <lizefan@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 4219b2da
...@@ -582,17 +582,12 @@ static void disarm_sock_keys(struct mem_cgroup *memcg) ...@@ -582,17 +582,12 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
/* /*
* This will be the memcg's index in each cache's ->memcg_params->memcg_caches. * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
* There are two main reasons for not using the css_id for this: * The main reason for not using cgroup id for this:
* 1) this works better in sparse environments, where we have a lot of memcgs, * this works better in sparse environments, where we have a lot of memcgs,
* but only a few kmem-limited. Or also, if we have, for instance, 200 * but only a few kmem-limited. Or also, if we have, for instance, 200
* memcgs, and none but the 200th is kmem-limited, we'd have to have a * memcgs, and none but the 200th is kmem-limited, we'd have to have a
* 200 entry array for that. * 200 entry array for that.
* *
* 2) In order not to violate the cgroup API, we would like to do all memory
* allocation in ->create(). At that point, we haven't yet allocated the
* css_id. Having a separate index prevents us from messing with the cgroup
* core for this
*
* The current size of the caches array is stored in * The current size of the caches array is stored in
* memcg_limited_groups_array_size. It will double each time we have to * memcg_limited_groups_array_size. It will double each time we have to
* increase it. * increase it.
...@@ -606,14 +601,14 @@ int memcg_limited_groups_array_size; ...@@ -606,14 +601,14 @@ int memcg_limited_groups_array_size;
* cgroups is a reasonable guess. In the future, it could be a parameter or * cgroups is a reasonable guess. In the future, it could be a parameter or
* tunable, but that is strictly not necessary. * tunable, but that is strictly not necessary.
* *
* MAX_SIZE should be as large as the number of css_ids. Ideally, we could get * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
* this constant directly from cgroup, but it is understandable that this is * this constant directly from cgroup, but it is understandable that this is
* better kept as an internal representation in cgroup.c. In any case, the * better kept as an internal representation in cgroup.c. In any case, the
* css_id space is not getting any smaller, and we don't have to necessarily * cgrp_id space is not getting any smaller, and we don't have to necessarily
* increase ours as well if it increases. * increase ours as well if it increases.
*/ */
#define MEMCG_CACHES_MIN_SIZE 4 #define MEMCG_CACHES_MIN_SIZE 4
#define MEMCG_CACHES_MAX_SIZE 65535 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
/* /*
* A lot of the calls to the cache allocation functions are expected to be * A lot of the calls to the cache allocation functions are expected to be
...@@ -5984,8 +5979,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) ...@@ -5984,8 +5979,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
int node; int node;
size_t size = memcg_size(); size_t size = memcg_size();
free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node) for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node); free_mem_cgroup_per_zone_info(memcg, node);
...@@ -6766,7 +6759,6 @@ struct cgroup_subsys mem_cgroup_subsys = { ...@@ -6766,7 +6759,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
.bind = mem_cgroup_bind, .bind = mem_cgroup_bind,
.base_cftypes = mem_cgroup_files, .base_cftypes = mem_cgroup_files,
.early_init = 0, .early_init = 0,
.use_id = 1,
}; };
#ifdef CONFIG_MEMCG_SWAP #ifdef CONFIG_MEMCG_SWAP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment