Commit 830e4bc5 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: clean up many BUG_ON

There are many BUG_ON in zsmalloc.c which is not recommened so change
them as alternatives.

Normal rule is as follows:

1. avoid BUG_ON if possible. Instead, use VM_BUG_ON or VM_BUG_ON_PAGE

2. use VM_BUG_ON_PAGE if we need to see struct page's fields

3. use those assertion in primitive functions so higher functions can
   rely on the assertion in the primitive function.

4. Don't use assertion if following instruction can trigger Oops
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Reviewed-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a4209467
...@@ -418,7 +418,7 @@ static void get_zspage_mapping(struct page *first_page, ...@@ -418,7 +418,7 @@ static void get_zspage_mapping(struct page *first_page,
enum fullness_group *fullness) enum fullness_group *fullness)
{ {
unsigned long m; unsigned long m;
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
m = (unsigned long)first_page->mapping; m = (unsigned long)first_page->mapping;
*fullness = m & FULLNESS_MASK; *fullness = m & FULLNESS_MASK;
...@@ -430,7 +430,7 @@ static void set_zspage_mapping(struct page *first_page, ...@@ -430,7 +430,7 @@ static void set_zspage_mapping(struct page *first_page,
enum fullness_group fullness) enum fullness_group fullness)
{ {
unsigned long m; unsigned long m;
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
(fullness & FULLNESS_MASK); (fullness & FULLNESS_MASK);
...@@ -631,7 +631,8 @@ static enum fullness_group get_fullness_group(struct page *first_page) ...@@ -631,7 +631,8 @@ static enum fullness_group get_fullness_group(struct page *first_page)
{ {
int inuse, max_objects; int inuse, max_objects;
enum fullness_group fg; enum fullness_group fg;
BUG_ON(!is_first_page(first_page));
VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
inuse = first_page->inuse; inuse = first_page->inuse;
max_objects = first_page->objects; max_objects = first_page->objects;
...@@ -659,7 +660,7 @@ static void insert_zspage(struct page *first_page, struct size_class *class, ...@@ -659,7 +660,7 @@ static void insert_zspage(struct page *first_page, struct size_class *class,
{ {
struct page **head; struct page **head;
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
if (fullness >= _ZS_NR_FULLNESS_GROUPS) if (fullness >= _ZS_NR_FULLNESS_GROUPS)
return; return;
...@@ -691,13 +692,13 @@ static void remove_zspage(struct page *first_page, struct size_class *class, ...@@ -691,13 +692,13 @@ static void remove_zspage(struct page *first_page, struct size_class *class,
{ {
struct page **head; struct page **head;
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
if (fullness >= _ZS_NR_FULLNESS_GROUPS) if (fullness >= _ZS_NR_FULLNESS_GROUPS)
return; return;
head = &class->fullness_list[fullness]; head = &class->fullness_list[fullness];
BUG_ON(!*head); VM_BUG_ON_PAGE(!*head, first_page);
if (list_empty(&(*head)->lru)) if (list_empty(&(*head)->lru))
*head = NULL; *head = NULL;
else if (*head == first_page) else if (*head == first_page)
...@@ -724,8 +725,6 @@ static enum fullness_group fix_fullness_group(struct size_class *class, ...@@ -724,8 +725,6 @@ static enum fullness_group fix_fullness_group(struct size_class *class,
int class_idx; int class_idx;
enum fullness_group currfg, newfg; enum fullness_group currfg, newfg;
BUG_ON(!is_first_page(first_page));
get_zspage_mapping(first_page, &class_idx, &currfg); get_zspage_mapping(first_page, &class_idx, &currfg);
newfg = get_fullness_group(first_page); newfg = get_fullness_group(first_page);
if (newfg == currfg) if (newfg == currfg)
...@@ -811,7 +810,7 @@ static void *location_to_obj(struct page *page, unsigned long obj_idx) ...@@ -811,7 +810,7 @@ static void *location_to_obj(struct page *page, unsigned long obj_idx)
unsigned long obj; unsigned long obj;
if (!page) { if (!page) {
BUG_ON(obj_idx); VM_BUG_ON(obj_idx);
return NULL; return NULL;
} }
...@@ -844,7 +843,7 @@ static unsigned long obj_to_head(struct size_class *class, struct page *page, ...@@ -844,7 +843,7 @@ static unsigned long obj_to_head(struct size_class *class, struct page *page,
void *obj) void *obj)
{ {
if (class->huge) { if (class->huge) {
VM_BUG_ON(!is_first_page(page)); VM_BUG_ON_PAGE(!is_first_page(page), page);
return page_private(page); return page_private(page);
} else } else
return *(unsigned long *)obj; return *(unsigned long *)obj;
...@@ -894,8 +893,8 @@ static void free_zspage(struct page *first_page) ...@@ -894,8 +893,8 @@ static void free_zspage(struct page *first_page)
{ {
struct page *nextp, *tmp, *head_extra; struct page *nextp, *tmp, *head_extra;
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
BUG_ON(first_page->inuse); VM_BUG_ON_PAGE(first_page->inuse, first_page);
head_extra = (struct page *)page_private(first_page); head_extra = (struct page *)page_private(first_page);
...@@ -921,7 +920,8 @@ static void init_zspage(struct page *first_page, struct size_class *class) ...@@ -921,7 +920,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
unsigned long off = 0; unsigned long off = 0;
struct page *page = first_page; struct page *page = first_page;
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
while (page) { while (page) {
struct page *next_page; struct page *next_page;
struct link_free *link; struct link_free *link;
...@@ -1238,7 +1238,7 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) ...@@ -1238,7 +1238,7 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
static bool zspage_full(struct page *first_page) static bool zspage_full(struct page *first_page)
{ {
BUG_ON(!is_first_page(first_page)); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
return first_page->inuse == first_page->objects; return first_page->inuse == first_page->objects;
} }
...@@ -1276,14 +1276,12 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, ...@@ -1276,14 +1276,12 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
struct page *pages[2]; struct page *pages[2];
void *ret; void *ret;
BUG_ON(!handle);
/* /*
* Because we use per-cpu mapping areas shared among the * Because we use per-cpu mapping areas shared among the
* pools/users, we can't allow mapping in interrupt context * pools/users, we can't allow mapping in interrupt context
* because it can corrupt another users mappings. * because it can corrupt another users mappings.
*/ */
BUG_ON(in_interrupt()); WARN_ON_ONCE(in_interrupt());
/* From now on, migration cannot move the object */ /* From now on, migration cannot move the object */
pin_tag(handle); pin_tag(handle);
...@@ -1327,8 +1325,6 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) ...@@ -1327,8 +1325,6 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
struct size_class *class; struct size_class *class;
struct mapping_area *area; struct mapping_area *area;
BUG_ON(!handle);
obj = handle_to_obj(handle); obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx); obj_to_location(obj, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg); get_zspage_mapping(get_first_page(page), &class_idx, &fg);
...@@ -1448,8 +1444,6 @@ static void obj_free(struct zs_pool *pool, struct size_class *class, ...@@ -1448,8 +1444,6 @@ static void obj_free(struct zs_pool *pool, struct size_class *class,
unsigned long f_objidx, f_offset; unsigned long f_objidx, f_offset;
void *vaddr; void *vaddr;
BUG_ON(!obj);
obj &= ~OBJ_ALLOCATED_TAG; obj &= ~OBJ_ALLOCATED_TAG;
obj_to_location(obj, &f_page, &f_objidx); obj_to_location(obj, &f_page, &f_objidx);
first_page = get_first_page(f_page); first_page = get_first_page(f_page);
...@@ -1549,7 +1543,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src, ...@@ -1549,7 +1543,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
kunmap_atomic(d_addr); kunmap_atomic(d_addr);
kunmap_atomic(s_addr); kunmap_atomic(s_addr);
s_page = get_next_page(s_page); s_page = get_next_page(s_page);
BUG_ON(!s_page);
s_addr = kmap_atomic(s_page); s_addr = kmap_atomic(s_page);
d_addr = kmap_atomic(d_page); d_addr = kmap_atomic(d_page);
s_size = class->size - written; s_size = class->size - written;
...@@ -1559,7 +1552,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src, ...@@ -1559,7 +1552,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
if (d_off >= PAGE_SIZE) { if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr); kunmap_atomic(d_addr);
d_page = get_next_page(d_page); d_page = get_next_page(d_page);
BUG_ON(!d_page);
d_addr = kmap_atomic(d_page); d_addr = kmap_atomic(d_page);
d_size = class->size - written; d_size = class->size - written;
d_off = 0; d_off = 0;
...@@ -1694,8 +1686,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, ...@@ -1694,8 +1686,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool,
{ {
enum fullness_group fullness; enum fullness_group fullness;
BUG_ON(!is_first_page(first_page));
fullness = get_fullness_group(first_page); fullness = get_fullness_group(first_page);
insert_zspage(first_page, class, fullness); insert_zspage(first_page, class, fullness);
set_zspage_mapping(first_page, class->index, fullness); set_zspage_mapping(first_page, class->index, fullness);
...@@ -1759,8 +1749,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) ...@@ -1759,8 +1749,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
spin_lock(&class->lock); spin_lock(&class->lock);
while ((src_page = isolate_source_page(class))) { while ((src_page = isolate_source_page(class))) {
BUG_ON(!is_first_page(src_page));
if (!zs_can_compact(class)) if (!zs_can_compact(class))
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment