Commit 6098d7e1 authored by Vitaly Wool's avatar Vitaly Wool Committed by Linus Torvalds

z3fold: fix reclaim lock-ups

Do not try to optimize in-page object layout while the page is under
reclaim.  This fixes lock-ups on reclaim and improves reclaim
performance at the same time.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20180430125800.444cae9706489f412ad12621@gmail.comSigned-off-by: default avatarVitaly Wool <vitaly.vul@sony.com>
Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Cc: <Oleksiy.Avramchenko@sony.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ae646f0b
...@@ -144,7 +144,8 @@ enum z3fold_page_flags { ...@@ -144,7 +144,8 @@ enum z3fold_page_flags {
PAGE_HEADLESS = 0, PAGE_HEADLESS = 0,
MIDDLE_CHUNK_MAPPED, MIDDLE_CHUNK_MAPPED,
NEEDS_COMPACTING, NEEDS_COMPACTING,
PAGE_STALE PAGE_STALE,
UNDER_RECLAIM
}; };
/***************** /*****************
...@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, ...@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
clear_bit(NEEDS_COMPACTING, &page->private); clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private); clear_bit(PAGE_STALE, &page->private);
clear_bit(UNDER_RECLAIM, &page->private);
spin_lock_init(&zhdr->page_lock); spin_lock_init(&zhdr->page_lock);
kref_init(&zhdr->refcount); kref_init(&zhdr->refcount);
...@@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) ...@@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
atomic64_dec(&pool->pages_nr); atomic64_dec(&pool->pages_nr);
return; return;
} }
if (test_bit(UNDER_RECLAIM, &page->private)) {
z3fold_page_unlock(zhdr);
return;
}
if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
return; return;
...@@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
kref_get(&zhdr->refcount); kref_get(&zhdr->refcount);
list_del_init(&zhdr->buddy); list_del_init(&zhdr->buddy);
zhdr->cpu = -1; zhdr->cpu = -1;
set_bit(UNDER_RECLAIM, &page->private);
break;
} }
list_del_init(&page->lru); list_del_init(&page->lru);
...@@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
goto next; goto next;
} }
next: next:
spin_lock(&pool->lock);
if (test_bit(PAGE_HEADLESS, &page->private)) { if (test_bit(PAGE_HEADLESS, &page->private)) {
if (ret == 0) { if (ret == 0) {
spin_unlock(&pool->lock);
free_z3fold_page(page); free_z3fold_page(page);
return 0; return 0;
} }
} else if (kref_put(&zhdr->refcount, release_z3fold_page)) { spin_lock(&pool->lock);
atomic64_dec(&pool->pages_nr); list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
} else {
z3fold_page_lock(zhdr);
clear_bit(UNDER_RECLAIM, &page->private);
if (kref_put(&zhdr->refcount,
release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr);
return 0;
}
/*
* if we are here, the page is still not completely
* free. Take the global pool lock then to be able
* to add it back to the lru list
*/
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return 0; z3fold_page_unlock(zhdr);
} }
/* /* We started off locked to we need to lock the pool back */
* Add to the beginning of LRU. spin_lock(&pool->lock);
* Pool lock has to be kept here to ensure the page has
* not already been released
*/
list_add(&page->lru, &pool->lru);
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return -EAGAIN; return -EAGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment