Commit f999f38b authored by Domenico Cerasuolo's avatar Domenico Cerasuolo Committed by Andrew Morton

mm: zswap: add pool shrinking mechanism

Patch series "mm: zswap: move writeback LRU from zpool to zswap", v3.

This series aims to improve the zswap reclaim mechanism by reorganizing
the LRU management. In the current implementation, the LRU is maintained
within each zpool driver, resulting in duplicated code across the three
drivers. The proposed change consists in moving the LRU management from
the individual implementations up to the zswap layer.

The primary objective of this refactoring effort is to simplify the
codebase. By unifying the reclaim loop and consolidating LRU handling
within zswap, we can eliminate redundant code and improve
maintainability. Additionally, this change enables the reclamation of
stored pages in their actual LRU order. Presently, the zpool drivers
link backing pages in an LRU, causing compressed pages with different
LRU positions to be written back simultaneously.

The series consists of several patches. The first patch implements the
LRU and the reclaim loop in zswap, but it is not used yet because all
three driver implementations are marked as zpool_evictable.
The following three commits modify each zpool driver to be not
zpool_evictable, allowing the use of the reclaim loop in zswap.
As the drivers removed their shrink functions, the zpool interface is
then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink.
Finally, the code in zswap is further cleaned up by simplifying the
writeback function and removing the now unnecessary zswap_header.


This patch (of 7):

Each zpool driver (zbud, z3fold and zsmalloc) implements its own shrink
function, which is called from zpool_shrink.  However, with this commit, a
unified shrink function is added to zswap.  The ultimate goal is to
eliminate the need for zpool_shrink once all zpool implementations have
dropped their shrink code.

To ensure the functionality of each commit, this change focuses solely on
adding the mechanism itself.  No modifications are made to the backends,
meaning that functionally, there are no immediate changes.  The zswap
mechanism will only come into effect once the backends have removed their
shrink code.  The subsequent commits will address the modifications needed
in the backends.

Link: https://lkml.kernel.org/r/20230612093815.133504-1-cerasuolodomenico@gmail.com
Link: https://lkml.kernel.org/r/20230612093815.133504-2-cerasuolodomenico@gmail.comSigned-off-by: default avatarDomenico Cerasuolo <cerasuolodomenico@gmail.com>
Acked-by: default avatarNhat Pham <nphamcs@gmail.com>
Tested-by: default avatarYosry Ahmed <yosryahmed@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarYosry Ahmed <yosryahmed@google.com>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0183d777
...@@ -154,6 +154,12 @@ struct crypto_acomp_ctx { ...@@ -154,6 +154,12 @@ struct crypto_acomp_ctx {
struct mutex *mutex; struct mutex *mutex;
}; };
/*
* The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
* The only case where lru_lock is not acquired while holding tree.lock is
* when a zswap_entry is taken off the lru for writeback, in that case it
* needs to be verified that it's still valid in the tree.
*/
struct zswap_pool { struct zswap_pool {
struct zpool *zpool; struct zpool *zpool;
struct crypto_acomp_ctx __percpu *acomp_ctx; struct crypto_acomp_ctx __percpu *acomp_ctx;
...@@ -163,6 +169,8 @@ struct zswap_pool { ...@@ -163,6 +169,8 @@ struct zswap_pool {
struct work_struct shrink_work; struct work_struct shrink_work;
struct hlist_node node; struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME]; char tfm_name[CRYPTO_MAX_ALG_NAME];
struct list_head lru;
spinlock_t lru_lock;
}; };
/* /*
...@@ -180,10 +188,12 @@ struct zswap_pool { ...@@ -180,10 +188,12 @@ struct zswap_pool {
* be held while changing the refcount. Since the lock must * be held while changing the refcount. Since the lock must
* be held, there is no reason to also make refcount atomic. * be held, there is no reason to also make refcount atomic.
* length - the length in bytes of the compressed page data. Needed during * length - the length in bytes of the compressed page data. Needed during
* decompression. For a same value filled page length is 0. * decompression. For a same value filled page length is 0, and both
* pool and lru are invalid and must be ignored.
* pool - the zswap_pool the entry's data is in * pool - the zswap_pool the entry's data is in
* handle - zpool allocation handle that stores the compressed page data * handle - zpool allocation handle that stores the compressed page data
* value - value of the same-value filled pages which have same content * value - value of the same-value filled pages which have same content
* lru - handle to the pool's lru used to evict pages.
*/ */
struct zswap_entry { struct zswap_entry {
struct rb_node rbnode; struct rb_node rbnode;
...@@ -196,6 +206,7 @@ struct zswap_entry { ...@@ -196,6 +206,7 @@ struct zswap_entry {
unsigned long value; unsigned long value;
}; };
struct obj_cgroup *objcg; struct obj_cgroup *objcg;
struct list_head lru;
}; };
struct zswap_header { struct zswap_header {
...@@ -368,6 +379,12 @@ static void zswap_free_entry(struct zswap_entry *entry) ...@@ -368,6 +379,12 @@ static void zswap_free_entry(struct zswap_entry *entry)
if (!entry->length) if (!entry->length)
atomic_dec(&zswap_same_filled_pages); atomic_dec(&zswap_same_filled_pages);
else { else {
/* zpool_evictable will be removed once all 3 backends have migrated */
if (!zpool_evictable(entry->pool->zpool)) {
spin_lock(&entry->pool->lru_lock);
list_del(&entry->lru);
spin_unlock(&entry->pool->lru_lock);
}
zpool_free(entry->pool->zpool, entry->handle); zpool_free(entry->pool->zpool, entry->handle);
zswap_pool_put(entry->pool); zswap_pool_put(entry->pool);
} }
...@@ -588,14 +605,72 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) ...@@ -588,14 +605,72 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
return NULL; return NULL;
} }
static int zswap_reclaim_entry(struct zswap_pool *pool)
{
struct zswap_header *zhdr;
struct zswap_entry *entry;
struct zswap_tree *tree;
pgoff_t swpoffset;
int ret;
/* Get an entry off the LRU */
spin_lock(&pool->lru_lock);
if (list_empty(&pool->lru)) {
spin_unlock(&pool->lru_lock);
return -EINVAL;
}
entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
list_del_init(&entry->lru);
zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO);
tree = zswap_trees[swp_type(zhdr->swpentry)];
zpool_unmap_handle(pool->zpool, entry->handle);
/*
* Once the lru lock is dropped, the entry might get freed. The
* swpoffset is copied to the stack, and entry isn't deref'd again
* until the entry is verified to still be alive in the tree.
*/
swpoffset = swp_offset(zhdr->swpentry);
spin_unlock(&pool->lru_lock);
/* Check for invalidate() race */
spin_lock(&tree->lock);
if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
ret = -EAGAIN;
goto unlock;
}
/* Hold a reference to prevent a free during writeback */
zswap_entry_get(entry);
spin_unlock(&tree->lock);
ret = zswap_writeback_entry(pool->zpool, entry->handle);
spin_lock(&tree->lock);
if (ret) {
/* Writeback failed, put entry back on LRU */
spin_lock(&pool->lru_lock);
list_move(&entry->lru, &pool->lru);
spin_unlock(&pool->lru_lock);
}
/* Drop local reference */
zswap_entry_put(tree, entry);
unlock:
spin_unlock(&tree->lock);
return ret ? -EAGAIN : 0;
}
static void shrink_worker(struct work_struct *w) static void shrink_worker(struct work_struct *w)
{ {
struct zswap_pool *pool = container_of(w, typeof(*pool), struct zswap_pool *pool = container_of(w, typeof(*pool),
shrink_work); shrink_work);
int ret, failures = 0; int ret, failures = 0;
/* zpool_evictable will be removed once all 3 backends have migrated */
do { do {
ret = zpool_shrink(pool->zpool, 1, NULL); if (zpool_evictable(pool->zpool))
ret = zpool_shrink(pool->zpool, 1, NULL);
else
ret = zswap_reclaim_entry(pool);
if (ret) { if (ret) {
zswap_reject_reclaim_fail++; zswap_reject_reclaim_fail++;
if (ret != -EAGAIN) if (ret != -EAGAIN)
...@@ -659,6 +734,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) ...@@ -659,6 +734,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
*/ */
kref_init(&pool->kref); kref_init(&pool->kref);
INIT_LIST_HEAD(&pool->list); INIT_LIST_HEAD(&pool->list);
INIT_LIST_HEAD(&pool->lru);
spin_lock_init(&pool->lru_lock);
INIT_WORK(&pool->shrink_work, shrink_worker); INIT_WORK(&pool->shrink_work, shrink_worker);
zswap_pool_debug("created", pool); zswap_pool_debug("created", pool);
...@@ -1274,7 +1351,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ...@@ -1274,7 +1351,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
} }
/* store */ /* store */
hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0; hlen = sizeof(zhdr);
gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
if (zpool_malloc_support_movable(entry->pool->zpool)) if (zpool_malloc_support_movable(entry->pool->zpool))
gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
...@@ -1317,6 +1394,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ...@@ -1317,6 +1394,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_entry_put(tree, dupentry); zswap_entry_put(tree, dupentry);
} }
} while (ret == -EEXIST); } while (ret == -EEXIST);
/* zpool_evictable will be removed once all 3 backends have migrated */
if (entry->length && !zpool_evictable(entry->pool->zpool)) {
spin_lock(&entry->pool->lru_lock);
list_add(&entry->lru, &entry->pool->lru);
spin_unlock(&entry->pool->lru_lock);
}
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
/* update stats */ /* update stats */
...@@ -1398,8 +1481,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, ...@@ -1398,8 +1481,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
/* decompress */ /* decompress */
dlen = PAGE_SIZE; dlen = PAGE_SIZE;
src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
if (zpool_evictable(entry->pool->zpool)) src += sizeof(struct zswap_header);
src += sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(entry->pool->zpool)) { if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
memcpy(tmp, src, entry->length); memcpy(tmp, src, entry->length);
...@@ -1432,6 +1514,11 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, ...@@ -1432,6 +1514,11 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
if (!ret && zswap_exclusive_loads_enabled) { if (!ret && zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry); zswap_invalidate_entry(tree, entry);
*exclusive = true; *exclusive = true;
} else if (entry->length && !zpool_evictable(entry->pool->zpool)) {
/* zpool_evictable will be removed once all 3 backends have migrated */
spin_lock(&entry->pool->lru_lock);
list_move(&entry->lru, &entry->pool->lru);
spin_unlock(&entry->pool->lru_lock);
} }
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment