Commit 3828a764 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: rename zs_stat_type to class_stat_type

The stat aims for class stat, not zspage so rename it.

Link: https://lkml.kernel.org/r/20211115185909.3949505-3-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 67f1c9cd
...@@ -158,7 +158,7 @@ enum fullness_group { ...@@ -158,7 +158,7 @@ enum fullness_group {
NR_ZS_FULLNESS, NR_ZS_FULLNESS,
}; };
enum zs_stat_type { enum class_stat_type {
CLASS_EMPTY, CLASS_EMPTY,
CLASS_ALMOST_EMPTY, CLASS_ALMOST_EMPTY,
CLASS_ALMOST_FULL, CLASS_ALMOST_FULL,
...@@ -549,21 +549,21 @@ static int get_size_class_index(int size) ...@@ -549,21 +549,21 @@ static int get_size_class_index(int size)
return min_t(int, ZS_SIZE_CLASSES - 1, idx); return min_t(int, ZS_SIZE_CLASSES - 1, idx);
} }
/* type can be of enum type zs_stat_type or fullness_group */ /* type can be of enum type class_stat_type or fullness_group */
static inline void zs_stat_inc(struct size_class *class, static inline void class_stat_inc(struct size_class *class,
int type, unsigned long cnt) int type, unsigned long cnt)
{ {
class->stats.objs[type] += cnt; class->stats.objs[type] += cnt;
} }
/* type can be of enum type zs_stat_type or fullness_group */ /* type can be of enum type class_stat_type or fullness_group */
static inline void zs_stat_dec(struct size_class *class, static inline void class_stat_dec(struct size_class *class,
int type, unsigned long cnt) int type, unsigned long cnt)
{ {
class->stats.objs[type] -= cnt; class->stats.objs[type] -= cnt;
} }
/* type can be of enum type zs_stat_type or fullness_group */ /* type can be of enum type class_stat_type or fullness_group */
static inline unsigned long zs_stat_get(struct size_class *class, static inline unsigned long zs_stat_get(struct size_class *class,
int type) int type)
{ {
...@@ -725,7 +725,7 @@ static void insert_zspage(struct size_class *class, ...@@ -725,7 +725,7 @@ static void insert_zspage(struct size_class *class,
{ {
struct zspage *head; struct zspage *head;
zs_stat_inc(class, fullness, 1); class_stat_inc(class, fullness, 1);
head = list_first_entry_or_null(&class->fullness_list[fullness], head = list_first_entry_or_null(&class->fullness_list[fullness],
struct zspage, list); struct zspage, list);
/* /*
...@@ -750,7 +750,7 @@ static void remove_zspage(struct size_class *class, ...@@ -750,7 +750,7 @@ static void remove_zspage(struct size_class *class,
VM_BUG_ON(is_zspage_isolated(zspage)); VM_BUG_ON(is_zspage_isolated(zspage));
list_del_init(&zspage->list); list_del_init(&zspage->list);
zs_stat_dec(class, fullness, 1); class_stat_dec(class, fullness, 1);
} }
/* /*
...@@ -964,7 +964,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, ...@@ -964,7 +964,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
cache_free_zspage(pool, zspage); cache_free_zspage(pool, zspage);
zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); class_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
atomic_long_sub(class->pages_per_zspage, atomic_long_sub(class->pages_per_zspage,
&pool->pages_allocated); &pool->pages_allocated);
} }
...@@ -1394,7 +1394,7 @@ static unsigned long obj_malloc(struct size_class *class, ...@@ -1394,7 +1394,7 @@ static unsigned long obj_malloc(struct size_class *class,
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1); mod_zspage_inuse(zspage, 1);
zs_stat_inc(class, OBJ_USED, 1); class_stat_inc(class, OBJ_USED, 1);
obj = location_to_obj(m_page, obj); obj = location_to_obj(m_page, obj);
...@@ -1458,7 +1458,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1458,7 +1458,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
record_obj(handle, obj); record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, atomic_long_add(class->pages_per_zspage,
&pool->pages_allocated); &pool->pages_allocated);
zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
/* We completely set up zspage so mark them as movable */ /* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage); SetZsPageMovable(pool, zspage);
...@@ -1489,7 +1489,7 @@ static void obj_free(struct size_class *class, unsigned long obj) ...@@ -1489,7 +1489,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
set_freeobj(zspage, f_objidx); set_freeobj(zspage, f_objidx);
mod_zspage_inuse(zspage, -1); mod_zspage_inuse(zspage, -1);
zs_stat_dec(class, OBJ_USED, 1); class_stat_dec(class, OBJ_USED, 1);
} }
void zs_free(struct zs_pool *pool, unsigned long handle) void zs_free(struct zs_pool *pool, unsigned long handle)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment