Commit 6b3379e8 authored by Johannes Weiner's avatar Johannes Weiner Committed by Andrew Morton

zswap: fix writeback lock ordering for zsmalloc

Patch series "Implement writeback for zsmalloc", v7.

Unlike other zswap allocators such as zbud or z3fold, zsmalloc currently
lacks the writeback mechanism.  This means that when the zswap pool is
full, it will simply reject further allocations, and the pages will be
written directly to swap.

This series of patches implements writeback for zsmalloc. When the zswap
pool becomes full, zsmalloc will attempt to evict all the compressed
objects in the least-recently used zspages.


This patch (of 6):

zswap's customary lock order is tree->lock before pool->lock, because the
tree->lock protects the entries' refcount, and the free callbacks in the
backends acquire their respective pool locks to dispatch the backing
object.  zsmalloc's map callback takes the pool lock, so zswap must not
grab the tree->lock while a handle is mapped.  This currently only happens
during writeback, which isn't implemented for zsmalloc.  In preparation
for it, move the tree->lock section out of the mapped entry section

Link: https://lkml.kernel.org/r/20221128191616.1261026-1-nphamcs@gmail.com
Link: https://lkml.kernel.org/r/20221128191616.1261026-2-nphamcs@gmail.comSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarNhat Pham <nphamcs@gmail.com>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fd3b1bc3
...@@ -968,6 +968,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) ...@@ -968,6 +968,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
swpentry = zhdr->swpentry; /* here */ swpentry = zhdr->swpentry; /* here */
tree = zswap_trees[swp_type(swpentry)]; tree = zswap_trees[swp_type(swpentry)];
offset = swp_offset(swpentry); offset = swp_offset(swpentry);
zpool_unmap_handle(pool, handle);
/* find and ref zswap entry */ /* find and ref zswap entry */
spin_lock(&tree->lock); spin_lock(&tree->lock);
...@@ -975,20 +976,12 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) ...@@ -975,20 +976,12 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
if (!entry) { if (!entry) {
/* entry was invalidated */ /* entry was invalidated */
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
zpool_unmap_handle(pool, handle);
kfree(tmp); kfree(tmp);
return 0; return 0;
} }
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
BUG_ON(offset != entry->offset); BUG_ON(offset != entry->offset);
src = (u8 *)zhdr + sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length);
src = tmp;
zpool_unmap_handle(pool, handle);
}
/* try to allocate swap cache page */ /* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) { switch (zswap_get_swap_cache_page(swpentry, &page)) {
case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
...@@ -1006,6 +999,14 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) ...@@ -1006,6 +999,14 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE; dlen = PAGE_SIZE;
zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
src = (u8 *)zhdr + sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length);
src = tmp;
zpool_unmap_handle(pool, handle);
}
mutex_lock(acomp_ctx->mutex); mutex_lock(acomp_ctx->mutex);
sg_init_one(&input, src, entry->length); sg_init_one(&input, src, entry->length);
sg_init_table(&output, 1); sg_init_table(&output, 1);
...@@ -1015,6 +1016,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) ...@@ -1015,6 +1016,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
dlen = acomp_ctx->req->dlen; dlen = acomp_ctx->req->dlen;
mutex_unlock(acomp_ctx->mutex); mutex_unlock(acomp_ctx->mutex);
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
else
zpool_unmap_handle(pool, handle);
BUG_ON(ret); BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE); BUG_ON(dlen != PAGE_SIZE);
...@@ -1045,7 +1051,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) ...@@ -1045,7 +1051,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
zswap_entry_put(tree, entry); zswap_entry_put(tree, entry);
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
goto end; return ret;
fail:
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
/* /*
* if we get here due to ZSWAP_SWAPCACHE_EXIST * if we get here due to ZSWAP_SWAPCACHE_EXIST
...@@ -1054,17 +1064,10 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) ...@@ -1054,17 +1064,10 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
* if we free the entry in the following put * if we free the entry in the following put
* it is also okay to return !0 * it is also okay to return !0
*/ */
fail:
spin_lock(&tree->lock); spin_lock(&tree->lock);
zswap_entry_put(tree, entry); zswap_entry_put(tree, entry);
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
end:
if (zpool_can_sleep_mapped(pool))
zpool_unmap_handle(pool, handle);
else
kfree(tmp);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment