Commit a6a8cdfd authored by Chengming Zhou's avatar Chengming Zhou Committed by Andrew Morton

mm/zsmalloc: remove set_zspage_mapping()

Patch series "mm/zsmalloc: some cleanup for get/set_zspage_mapping()".

The discussion[1] with Sergey shows there are some cleanup works to do
in get/set_zspage_mapping():

- the fullness returned from get_zspage_mapping() is not stable outside
  pool->lock, this usage pattern is confusing, but should be ok in this
  free_zspage path.

- we seldom use the class_idx returned from get_zspage_mapping(), only
  free_zspage path use to get its class.

- set_zspage_mapping() always set the zspage->class, but it's never
  changed after zspage allocated.

[1] https://lore.kernel.org/all/a6c22e30-cf10-4122-91bc-ceb9fb57a5d6@bytedance.com/


This patch (of 3):

We only need to update zspage->fullness when insert_zspage(), since
zspage->class is never changed after allocated.

Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-0-5c5ee4ccdd87@bytedance.com
Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-1-5c5ee4ccdd87@bytedance.comSigned-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f6f3f275
...@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool, ...@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool,
return pool->size_class[zspage->class]; return pool->size_class[zspage->class];
} }
static void set_zspage_mapping(struct zspage *zspage,
unsigned int class_idx,
int fullness)
{
zspage->class = class_idx;
zspage->fullness = fullness;
}
/* /*
* zsmalloc divides the pool into various size classes where each * zsmalloc divides the pool into various size classes where each
* class maintains a list of zspages where each zspage is divided * class maintains a list of zspages where each zspage is divided
...@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class, ...@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class,
{ {
class_stat_inc(class, fullness, 1); class_stat_inc(class, fullness, 1);
list_add(&zspage->list, &class->fullness_list[fullness]); list_add(&zspage->list, &class->fullness_list[fullness]);
zspage->fullness = fullness;
} }
/* /*
...@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage) ...@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
remove_zspage(class, zspage, currfg); remove_zspage(class, zspage, currfg);
insert_zspage(class, zspage, newfg); insert_zspage(class, zspage, newfg);
set_zspage_mapping(zspage, class_idx, newfg);
out: out:
return newfg; return newfg;
} }
...@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, ...@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, pages); create_page_chain(class, zspage, pages);
init_zspage(class, zspage); init_zspage(class, zspage);
zspage->pool = pool; zspage->pool = pool;
zspage->class = class->index;
return zspage; return zspage;
} }
...@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
obj = obj_malloc(pool, zspage, handle); obj = obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage); newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg); insert_zspage(class, zspage, newfg);
set_zspage_mapping(zspage, class->index, newfg);
record_obj(handle, obj); record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
...@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage) ...@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
fullness = get_fullness_group(class, zspage); fullness = get_fullness_group(class, zspage);
insert_zspage(class, zspage, fullness); insert_zspage(class, zspage, fullness);
set_zspage_mapping(zspage, class->index, fullness);
return fullness; return fullness;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment