Commit 67f1c9cd authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: introduce some helper functions

Patch series "zsmalloc: remove bit_spin_lock", v2.

zsmalloc uses bit_spin_lock to minimize space overhead since it's zpage
granularity lock.  However, it causes zsmalloc non-working under
PREEMPT_RT as well as adding too much complication.

This patchset tries to replace the bit_spin_lock with per-pool rwlock.
It also removes unnecessary zspage isolation logic from class, which was
the other part too much complication added into zsmalloc.

Last patch changes the get_cpu_var to local_lock to make it work in
PREEMPT_RT.

This patch (of 9):

get_zspage_mapping returns fullness as well as class_idx.  However, the
fullness is usually not used since it could be stale in some contexts.
It causes misleading as well as unnecessary instructions so this patch
introduces zspage_class.

obj_to_location also produces page and index but we don't need always
the index, either so this patch introduces obj_to_page.

Link: https://lkml.kernel.org/r/20211115185909.3949505-1-minchan@kernel.org
Link: https://lkml.kernel.org/r/20211115185909.3949505-2-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1622ed7d
...@@ -517,6 +517,12 @@ static void get_zspage_mapping(struct zspage *zspage, ...@@ -517,6 +517,12 @@ static void get_zspage_mapping(struct zspage *zspage,
*class_idx = zspage->class; *class_idx = zspage->class;
} }
static struct size_class *zspage_class(struct zs_pool *pool,
struct zspage *zspage)
{
return pool->size_class[zspage->class];
}
static void set_zspage_mapping(struct zspage *zspage, static void set_zspage_mapping(struct zspage *zspage,
unsigned int class_idx, unsigned int class_idx,
enum fullness_group fullness) enum fullness_group fullness)
...@@ -844,6 +850,12 @@ static void obj_to_location(unsigned long obj, struct page **page, ...@@ -844,6 +850,12 @@ static void obj_to_location(unsigned long obj, struct page **page,
*obj_idx = (obj & OBJ_INDEX_MASK); *obj_idx = (obj & OBJ_INDEX_MASK);
} }
static void obj_to_page(unsigned long obj, struct page **page)
{
obj >>= OBJ_TAG_BITS;
*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
}
/** /**
* location_to_obj - get obj value encoded from (<page>, <obj_idx>) * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
* @page: page object resides in zspage * @page: page object resides in zspage
...@@ -1246,8 +1258,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, ...@@ -1246,8 +1258,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
unsigned long obj, off; unsigned long obj, off;
unsigned int obj_idx; unsigned int obj_idx;
unsigned int class_idx;
enum fullness_group fg;
struct size_class *class; struct size_class *class;
struct mapping_area *area; struct mapping_area *area;
struct page *pages[2]; struct page *pages[2];
...@@ -1270,8 +1280,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, ...@@ -1270,8 +1280,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
/* migration cannot move any subpage in this zspage */ /* migration cannot move any subpage in this zspage */
migrate_read_lock(zspage); migrate_read_lock(zspage);
get_zspage_mapping(zspage, &class_idx, &fg); class = zspage_class(pool, zspage);
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK; off = (class->size * obj_idx) & ~PAGE_MASK;
area = &get_cpu_var(zs_map_area); area = &get_cpu_var(zs_map_area);
...@@ -1304,16 +1313,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) ...@@ -1304,16 +1313,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
unsigned long obj, off; unsigned long obj, off;
unsigned int obj_idx; unsigned int obj_idx;
unsigned int class_idx;
enum fullness_group fg;
struct size_class *class; struct size_class *class;
struct mapping_area *area; struct mapping_area *area;
obj = handle_to_obj(handle); obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx); obj_to_location(obj, &page, &obj_idx);
zspage = get_zspage(page); zspage = get_zspage(page);
get_zspage_mapping(zspage, &class_idx, &fg); class = zspage_class(pool, zspage);
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK; off = (class->size * obj_idx) & ~PAGE_MASK;
area = this_cpu_ptr(&zs_map_area); area = this_cpu_ptr(&zs_map_area);
...@@ -1491,8 +1497,6 @@ void zs_free(struct zs_pool *pool, unsigned long handle) ...@@ -1491,8 +1497,6 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
struct zspage *zspage; struct zspage *zspage;
struct page *f_page; struct page *f_page;
unsigned long obj; unsigned long obj;
unsigned int f_objidx;
int class_idx;
struct size_class *class; struct size_class *class;
enum fullness_group fullness; enum fullness_group fullness;
bool isolated; bool isolated;
...@@ -1502,13 +1506,11 @@ void zs_free(struct zs_pool *pool, unsigned long handle) ...@@ -1502,13 +1506,11 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
pin_tag(handle); pin_tag(handle);
obj = handle_to_obj(handle); obj = handle_to_obj(handle);
obj_to_location(obj, &f_page, &f_objidx); obj_to_page(obj, &f_page);
zspage = get_zspage(f_page); zspage = get_zspage(f_page);
migrate_read_lock(zspage); migrate_read_lock(zspage);
class = zspage_class(pool, zspage);
get_zspage_mapping(zspage, &class_idx, &fullness);
class = pool->size_class[class_idx];
spin_lock(&class->lock); spin_lock(&class->lock);
obj_free(class, obj); obj_free(class, obj);
...@@ -1866,8 +1868,6 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) ...@@ -1866,8 +1868,6 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
{ {
struct zs_pool *pool; struct zs_pool *pool;
struct size_class *class; struct size_class *class;
int class_idx;
enum fullness_group fullness;
struct zspage *zspage; struct zspage *zspage;
struct address_space *mapping; struct address_space *mapping;
...@@ -1880,15 +1880,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) ...@@ -1880,15 +1880,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
zspage = get_zspage(page); zspage = get_zspage(page);
/*
* Without class lock, fullness could be stale while class_idx is okay
* because class_idx is constant unless page is freed so we should get
* fullness again under class lock.
*/
get_zspage_mapping(zspage, &class_idx, &fullness);
mapping = page_mapping(page); mapping = page_mapping(page);
pool = mapping->private_data; pool = mapping->private_data;
class = pool->size_class[class_idx];
class = zspage_class(pool, zspage);
spin_lock(&class->lock); spin_lock(&class->lock);
if (get_zspage_inuse(zspage) == 0) { if (get_zspage_inuse(zspage) == 0) {
...@@ -1907,6 +1902,9 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) ...@@ -1907,6 +1902,9 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
* size_class to prevent further object allocation from the zspage. * size_class to prevent further object allocation from the zspage.
*/ */
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
enum fullness_group fullness;
unsigned int class_idx;
get_zspage_mapping(zspage, &class_idx, &fullness); get_zspage_mapping(zspage, &class_idx, &fullness);
atomic_long_inc(&pool->isolated_pages); atomic_long_inc(&pool->isolated_pages);
remove_zspage(class, zspage, fullness); remove_zspage(class, zspage, fullness);
...@@ -1923,8 +1921,6 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, ...@@ -1923,8 +1921,6 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
{ {
struct zs_pool *pool; struct zs_pool *pool;
struct size_class *class; struct size_class *class;
int class_idx;
enum fullness_group fullness;
struct zspage *zspage; struct zspage *zspage;
struct page *dummy; struct page *dummy;
void *s_addr, *d_addr, *addr; void *s_addr, *d_addr, *addr;
...@@ -1949,9 +1945,8 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, ...@@ -1949,9 +1945,8 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
/* Concurrent compactor cannot migrate any subpage in zspage */ /* Concurrent compactor cannot migrate any subpage in zspage */
migrate_write_lock(zspage); migrate_write_lock(zspage);
get_zspage_mapping(zspage, &class_idx, &fullness);
pool = mapping->private_data; pool = mapping->private_data;
class = pool->size_class[class_idx]; class = zspage_class(pool, zspage);
offset = get_first_obj_offset(page); offset = get_first_obj_offset(page);
spin_lock(&class->lock); spin_lock(&class->lock);
...@@ -2049,8 +2044,6 @@ static void zs_page_putback(struct page *page) ...@@ -2049,8 +2044,6 @@ static void zs_page_putback(struct page *page)
{ {
struct zs_pool *pool; struct zs_pool *pool;
struct size_class *class; struct size_class *class;
int class_idx;
enum fullness_group fg;
struct address_space *mapping; struct address_space *mapping;
struct zspage *zspage; struct zspage *zspage;
...@@ -2058,10 +2051,9 @@ static void zs_page_putback(struct page *page) ...@@ -2058,10 +2051,9 @@ static void zs_page_putback(struct page *page)
VM_BUG_ON_PAGE(!PageIsolated(page), page); VM_BUG_ON_PAGE(!PageIsolated(page), page);
zspage = get_zspage(page); zspage = get_zspage(page);
get_zspage_mapping(zspage, &class_idx, &fg);
mapping = page_mapping(page); mapping = page_mapping(page);
pool = mapping->private_data; pool = mapping->private_data;
class = pool->size_class[class_idx]; class = zspage_class(pool, zspage);
spin_lock(&class->lock); spin_lock(&class->lock);
dec_zspage_isolation(zspage); dec_zspage_isolation(zspage);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment