Commit 0bb48849 authored by Domenico Cerasuolo's avatar Domenico Cerasuolo Committed by Andrew Morton

mm: zswap: remove zswap_header

Previously, zswap_header served the purpose of storing the swpentry within
zpool pages.  This allowed zpool implementations to pass relevant
information to the writeback function.  However, with the current
implementation, writeback is directly handled within zswap.  Consequently,
there is no longer a necessity for zswap_header, as the swp_entry_t can be
stored directly in zswap_entry.

Link: https://lkml.kernel.org/r/20230612093815.133504-8-cerasuolodomenico@gmail.comSigned-off-by: default avatarDomenico Cerasuolo <cerasuolodomenico@gmail.com>
Tested-by: default avatarYosry Ahmed <yosryahmed@google.com>
Suggested-by: default avatarYosry Ahmed <yosryahmed@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ff9d5ba2
...@@ -197,7 +197,7 @@ struct zswap_pool { ...@@ -197,7 +197,7 @@ struct zswap_pool {
*/ */
struct zswap_entry { struct zswap_entry {
struct rb_node rbnode; struct rb_node rbnode;
pgoff_t offset; swp_entry_t swpentry;
int refcount; int refcount;
unsigned int length; unsigned int length;
struct zswap_pool *pool; struct zswap_pool *pool;
...@@ -209,10 +209,6 @@ struct zswap_entry { ...@@ -209,10 +209,6 @@ struct zswap_entry {
struct list_head lru; struct list_head lru;
}; };
struct zswap_header {
swp_entry_t swpentry;
};
/* /*
* The tree lock in the zswap_tree struct protects a few things: * The tree lock in the zswap_tree struct protects a few things:
* - the rbtree * - the rbtree
...@@ -254,7 +250,7 @@ static bool zswap_has_pool; ...@@ -254,7 +250,7 @@ static bool zswap_has_pool;
pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
zpool_get_type((p)->zpool)) zpool_get_type((p)->zpool))
static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, static int zswap_writeback_entry(struct zswap_entry *entry,
struct zswap_tree *tree); struct zswap_tree *tree);
static int zswap_pool_get(struct zswap_pool *pool); static int zswap_pool_get(struct zswap_pool *pool);
static void zswap_pool_put(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool);
...@@ -315,12 +311,14 @@ static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) ...@@ -315,12 +311,14 @@ static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
{ {
struct rb_node *node = root->rb_node; struct rb_node *node = root->rb_node;
struct zswap_entry *entry; struct zswap_entry *entry;
pgoff_t entry_offset;
while (node) { while (node) {
entry = rb_entry(node, struct zswap_entry, rbnode); entry = rb_entry(node, struct zswap_entry, rbnode);
if (entry->offset > offset) entry_offset = swp_offset(entry->swpentry);
if (entry_offset > offset)
node = node->rb_left; node = node->rb_left;
else if (entry->offset < offset) else if (entry_offset < offset)
node = node->rb_right; node = node->rb_right;
else else
return entry; return entry;
...@@ -337,13 +335,15 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, ...@@ -337,13 +335,15 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
{ {
struct rb_node **link = &root->rb_node, *parent = NULL; struct rb_node **link = &root->rb_node, *parent = NULL;
struct zswap_entry *myentry; struct zswap_entry *myentry;
pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
while (*link) { while (*link) {
parent = *link; parent = *link;
myentry = rb_entry(parent, struct zswap_entry, rbnode); myentry = rb_entry(parent, struct zswap_entry, rbnode);
if (myentry->offset > entry->offset) myentry_offset = swp_offset(myentry->swpentry);
if (myentry_offset > entry_offset)
link = &(*link)->rb_left; link = &(*link)->rb_left;
else if (myentry->offset < entry->offset) else if (myentry_offset < entry_offset)
link = &(*link)->rb_right; link = &(*link)->rb_right;
else { else {
*dupentry = myentry; *dupentry = myentry;
...@@ -601,7 +601,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) ...@@ -601,7 +601,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
static int zswap_reclaim_entry(struct zswap_pool *pool) static int zswap_reclaim_entry(struct zswap_pool *pool)
{ {
struct zswap_header *zhdr;
struct zswap_entry *entry; struct zswap_entry *entry;
struct zswap_tree *tree; struct zswap_tree *tree;
pgoff_t swpoffset; pgoff_t swpoffset;
...@@ -615,15 +614,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) ...@@ -615,15 +614,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
} }
entry = list_last_entry(&pool->lru, struct zswap_entry, lru); entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
list_del_init(&entry->lru); list_del_init(&entry->lru);
zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO);
tree = zswap_trees[swp_type(zhdr->swpentry)];
zpool_unmap_handle(pool->zpool, entry->handle);
/* /*
* Once the lru lock is dropped, the entry might get freed. The * Once the lru lock is dropped, the entry might get freed. The
* swpoffset is copied to the stack, and entry isn't deref'd again * swpoffset is copied to the stack, and entry isn't deref'd again
* until the entry is verified to still be alive in the tree. * until the entry is verified to still be alive in the tree.
*/ */
swpoffset = swp_offset(zhdr->swpentry); swpoffset = swp_offset(entry->swpentry);
tree = zswap_trees[swp_type(entry->swpentry)];
spin_unlock(&pool->lru_lock); spin_unlock(&pool->lru_lock);
/* Check for invalidate() race */ /* Check for invalidate() race */
...@@ -636,7 +633,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) ...@@ -636,7 +633,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
zswap_entry_get(entry); zswap_entry_get(entry);
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
ret = zswap_writeback_entry(entry, zhdr, tree); ret = zswap_writeback_entry(entry, tree);
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (ret) { if (ret) {
...@@ -1055,10 +1052,10 @@ static int zswap_get_swap_cache_page(swp_entry_t entry, ...@@ -1055,10 +1052,10 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
* the swap cache, the compressed version stored by zswap can be * the swap cache, the compressed version stored by zswap can be
* freed. * freed.
*/ */
static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, static int zswap_writeback_entry(struct zswap_entry *entry,
struct zswap_tree *tree) struct zswap_tree *tree)
{ {
swp_entry_t swpentry = zhdr->swpentry; swp_entry_t swpentry = entry->swpentry;
struct page *page; struct page *page;
struct scatterlist input, output; struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx; struct crypto_acomp_ctx *acomp_ctx;
...@@ -1098,7 +1095,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header ...@@ -1098,7 +1095,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header
* writing. * writing.
*/ */
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) { if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
delete_from_swap_cache(page_folio(page)); delete_from_swap_cache(page_folio(page));
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1110,8 +1107,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header ...@@ -1110,8 +1107,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE; dlen = PAGE_SIZE;
zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
src = (u8 *)zhdr + sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(pool)) { if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length); memcpy(tmp, src, entry->length);
src = tmp; src = tmp;
...@@ -1205,11 +1201,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ...@@ -1205,11 +1201,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
struct obj_cgroup *objcg = NULL; struct obj_cgroup *objcg = NULL;
struct zswap_pool *pool; struct zswap_pool *pool;
int ret; int ret;
unsigned int hlen, dlen = PAGE_SIZE; unsigned int dlen = PAGE_SIZE;
unsigned long handle, value; unsigned long handle, value;
char *buf; char *buf;
u8 *src, *dst; u8 *src, *dst;
struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
gfp_t gfp; gfp_t gfp;
/* THP isn't supported */ /* THP isn't supported */
...@@ -1254,7 +1249,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ...@@ -1254,7 +1249,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
src = kmap_atomic(page); src = kmap_atomic(page);
if (zswap_is_page_same_filled(src, &value)) { if (zswap_is_page_same_filled(src, &value)) {
kunmap_atomic(src); kunmap_atomic(src);
entry->offset = offset; entry->swpentry = swp_entry(type, offset);
entry->length = 0; entry->length = 0;
entry->value = value; entry->value = value;
atomic_inc(&zswap_same_filled_pages); atomic_inc(&zswap_same_filled_pages);
...@@ -1308,11 +1303,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ...@@ -1308,11 +1303,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
} }
/* store */ /* store */
hlen = sizeof(zhdr);
gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
if (zpool_malloc_support_movable(entry->pool->zpool)) if (zpool_malloc_support_movable(entry->pool->zpool))
gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle); ret = zpool_malloc(entry->pool->zpool, dlen, gfp, &handle);
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
zswap_reject_compress_poor++; zswap_reject_compress_poor++;
goto put_dstmem; goto put_dstmem;
...@@ -1322,13 +1316,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ...@@ -1322,13 +1316,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
goto put_dstmem; goto put_dstmem;
} }
buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO); buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
memcpy(buf, &zhdr, hlen); memcpy(buf, dst, dlen);
memcpy(buf + hlen, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle); zpool_unmap_handle(entry->pool->zpool, handle);
mutex_unlock(acomp_ctx->mutex); mutex_unlock(acomp_ctx->mutex);
/* populate entry */ /* populate entry */
entry->offset = offset; entry->swpentry = swp_entry(type, offset);
entry->handle = handle; entry->handle = handle;
entry->length = dlen; entry->length = dlen;
...@@ -1437,7 +1430,6 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, ...@@ -1437,7 +1430,6 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
/* decompress */ /* decompress */
dlen = PAGE_SIZE; dlen = PAGE_SIZE;
src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
src += sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(entry->pool->zpool)) { if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
memcpy(tmp, src, entry->length); memcpy(tmp, src, entry->length);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment