Commit 3f9d2b57 authored by Vitaly Wool's avatar Vitaly Wool Committed by Linus Torvalds

z3fold: fix retry mechanism in page reclaim

z3fold_page_reclaim()'s retry mechanism is broken: on a second iteration
it will have zhdr from the first one so that zhdr is no longer in line
with struct page.  That leads to crashes when the system is stressed.

Fix that by moving zhdr assignment up.

While at it, protect against using already freed handles by using own
local slots structure in z3fold_page_reclaim().

Link: http://lkml.kernel.org/r/20190908162919.830388dc7404d1e2c80f4095@gmail.comSigned-off-by: default avatarVitaly Wool <vitalywool@gmail.com>
Reported-by: default avatarMarkus Linnala <markus.linnala@gmail.com>
Reported-by: default avatarChris Murphy <bugzilla@colorremedies.com>
Reported-by: default avatarAgustin Dall'Alba <agustin@dallalba.com.ar>
Cc: "Maciej S. Szmigiero" <mail@maciej.szmigiero.name>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Henry Burns <henrywolfeburns@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 710ec38b
...@@ -366,9 +366,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) ...@@ -366,9 +366,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
* Encodes the handle of a particular buddy within a z3fold page * Encodes the handle of a particular buddy within a z3fold page
* Pool lock should be held as this function accesses first_num * Pool lock should be held as this function accesses first_num
*/ */
static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) static unsigned long __encode_handle(struct z3fold_header *zhdr,
struct z3fold_buddy_slots *slots,
enum buddy bud)
{ {
struct z3fold_buddy_slots *slots;
unsigned long h = (unsigned long)zhdr; unsigned long h = (unsigned long)zhdr;
int idx = 0; int idx = 0;
...@@ -385,11 +386,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) ...@@ -385,11 +386,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
if (bud == LAST) if (bud == LAST)
h |= (zhdr->last_chunks << BUDDY_SHIFT); h |= (zhdr->last_chunks << BUDDY_SHIFT);
slots = zhdr->slots;
slots->slot[idx] = h; slots->slot[idx] = h;
return (unsigned long)&slots->slot[idx]; return (unsigned long)&slots->slot[idx];
} }
static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
{
return __encode_handle(zhdr, zhdr->slots, bud);
}
/* Returns the z3fold page where a given handle is stored */ /* Returns the z3fold page where a given handle is stored */
static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
{ {
...@@ -624,6 +629,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) ...@@ -624,6 +629,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
} }
if (unlikely(PageIsolated(page) || if (unlikely(PageIsolated(page) ||
test_bit(PAGE_CLAIMED, &page->private) ||
test_bit(PAGE_STALE, &page->private))) { test_bit(PAGE_STALE, &page->private))) {
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
return; return;
...@@ -1100,6 +1106,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -1100,6 +1106,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
struct z3fold_header *zhdr = NULL; struct z3fold_header *zhdr = NULL;
struct page *page = NULL; struct page *page = NULL;
struct list_head *pos; struct list_head *pos;
struct z3fold_buddy_slots slots;
unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
spin_lock(&pool->lock); spin_lock(&pool->lock);
...@@ -1118,16 +1125,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -1118,16 +1125,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
/* this bit could have been set by free, in which case /* this bit could have been set by free, in which case
* we pass over to the next page in the pool. * we pass over to the next page in the pool.
*/ */
if (test_and_set_bit(PAGE_CLAIMED, &page->private)) if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
page = NULL;
continue; continue;
}
if (unlikely(PageIsolated(page))) if (unlikely(PageIsolated(page))) {
clear_bit(PAGE_CLAIMED, &page->private);
page = NULL;
continue; continue;
}
zhdr = page_address(page);
if (test_bit(PAGE_HEADLESS, &page->private)) if (test_bit(PAGE_HEADLESS, &page->private))
break; break;
zhdr = page_address(page);
if (!z3fold_page_trylock(zhdr)) { if (!z3fold_page_trylock(zhdr)) {
clear_bit(PAGE_CLAIMED, &page->private);
zhdr = NULL; zhdr = NULL;
continue; /* can't evict at this point */ continue; /* can't evict at this point */
} }
...@@ -1145,26 +1158,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -1145,26 +1158,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
if (!test_bit(PAGE_HEADLESS, &page->private)) { if (!test_bit(PAGE_HEADLESS, &page->private)) {
/* /*
* We need encode the handles before unlocking, since * We need encode the handles before unlocking, and
* we can race with free that will set * use our local slots structure because z3fold_free
* (first|last)_chunks to 0 * can zero out zhdr->slots and we can't do much
* about that
*/ */
first_handle = 0; first_handle = 0;
last_handle = 0; last_handle = 0;
middle_handle = 0; middle_handle = 0;
if (zhdr->first_chunks) if (zhdr->first_chunks)
first_handle = encode_handle(zhdr, FIRST); first_handle = __encode_handle(zhdr, &slots,
FIRST);
if (zhdr->middle_chunks) if (zhdr->middle_chunks)
middle_handle = encode_handle(zhdr, MIDDLE); middle_handle = __encode_handle(zhdr, &slots,
MIDDLE);
if (zhdr->last_chunks) if (zhdr->last_chunks)
last_handle = encode_handle(zhdr, LAST); last_handle = __encode_handle(zhdr, &slots,
LAST);
/* /*
* it's safe to unlock here because we hold a * it's safe to unlock here because we hold a
* reference to this page * reference to this page
*/ */
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
} else { } else {
first_handle = encode_handle(zhdr, HEADLESS); first_handle = __encode_handle(zhdr, &slots, HEADLESS);
last_handle = middle_handle = 0; last_handle = middle_handle = 0;
} }
...@@ -1194,9 +1211,9 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -1194,9 +1211,9 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
spin_lock(&pool->lock); spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru); list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
clear_bit(PAGE_CLAIMED, &page->private);
} else { } else {
z3fold_page_lock(zhdr); z3fold_page_lock(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
if (kref_put(&zhdr->refcount, if (kref_put(&zhdr->refcount,
release_z3fold_page_locked)) { release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr); atomic64_dec(&pool->pages_nr);
...@@ -1211,6 +1228,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -1211,6 +1228,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
list_add(&page->lru, &pool->lru); list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
} }
/* We started off locked to we need to lock the pool back */ /* We started off locked to we need to lock the pool back */
...@@ -1315,7 +1333,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) ...@@ -1315,7 +1333,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(PageIsolated(page), page); VM_BUG_ON_PAGE(PageIsolated(page), page);
if (test_bit(PAGE_HEADLESS, &page->private)) if (test_bit(PAGE_HEADLESS, &page->private) ||
test_bit(PAGE_CLAIMED, &page->private))
return false; return false;
zhdr = page_address(page); zhdr = page_address(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment