Commit 20a5532f authored by Usama Arif's avatar Usama Arif Committed by Andrew Morton

mm: remove code to handle same filled pages

With an earlier commit to handle zero-filled pages in swap directly, and
with only 1% of the same-filled pages being non-zero, zswap no longer
needs to handle same-filled pages and can just work on compressed pages.

Link: https://lkml.kernel.org/r/20240823190545.979059-3-usamaarif642@gmail.comSigned-off-by: default avatarUsama Arif <usamaarif642@gmail.com>
Reviewed-by: default avatarChengming Zhou <chengming.zhou@linux.dev>
Acked-by: default avatarYosry Ahmed <yosryahmed@google.com>
Reviewed-by: default avatarNhat Pham <nphamcs@gmail.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0ca0c24e
......@@ -44,8 +44,6 @@
**********************************/
/* The number of compressed pages currently stored in zswap */
atomic_t zswap_stored_pages = ATOMIC_INIT(0);
/* The number of same-value filled pages currently stored in zswap */
static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
/*
* The statistics below are not protected from concurrent access for
......@@ -185,8 +183,7 @@ static struct shrinker *zswap_shrinker;
*
* swpentry - associated swap entry, the offset indexes into the red-black tree
* length - the length in bytes of the compressed page data. Needed during
* decompression. For a same value filled page length is 0, and both
* pool and lru are invalid and must be ignored.
* decompression.
* referenced - true if the entry recently entered the zswap pool. Unset by the
* writeback logic. The entry is only reclaimed by the writeback
* logic if referenced is unset. See comments in the shrinker
......@@ -202,10 +199,7 @@ struct zswap_entry {
unsigned int length;
bool referenced;
struct zswap_pool *pool;
union {
unsigned long handle;
unsigned long value;
};
struct obj_cgroup *objcg;
struct list_head lru;
};
......@@ -801,13 +795,9 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
zswap_lru_del(&zswap_list_lru, entry);
zpool_free(entry->pool->zpool, entry->handle);
zswap_pool_put(entry->pool);
}
if (entry->objcg) {
obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
obj_cgroup_put(entry->objcg);
......@@ -1277,11 +1267,6 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
* This ensures that the better zswap compresses memory, the fewer
* pages we will evict to swap (as it will otherwise incur IO for
* relatively small memory saving).
*
* The memory saving factor calculated here takes same-filled pages into
* account, but those are not freeable since they almost occupy no
* space. Hence, we may scale nr_freeable down a little bit more than we
* should if we have a lot of same-filled pages.
*/
return mult_frac(nr_freeable, nr_backing, nr_stored);
}
......@@ -1416,42 +1401,6 @@ static void shrink_worker(struct work_struct *w)
} while (zswap_total_pages() > thr);
}
/*********************************
* same-filled functions
**********************************/
static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
{
unsigned long *data;
unsigned long val;
unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
bool ret = false;
data = kmap_local_folio(folio, 0);
val = data[0];
if (val != data[last_pos])
goto out;
for (pos = 1; pos < last_pos; pos++) {
if (val != data[pos])
goto out;
}
*value = val;
ret = true;
out:
kunmap_local(data);
return ret;
}
static void zswap_fill_folio(struct folio *folio, unsigned long value)
{
unsigned long *data = kmap_local_folio(folio, 0);
memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
kunmap_local(data);
}
/*********************************
* main API
**********************************/
......@@ -1463,7 +1412,6 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
unsigned long value;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
......@@ -1496,13 +1444,6 @@ bool zswap_store(struct folio *folio)
goto reject;
}
if (zswap_is_folio_same_filled(folio, &value)) {
entry->length = 0;
entry->value = value;
atomic_inc(&zswap_same_filled_pages);
goto store_entry;
}
/* if entry is successfully added, it keeps the reference */
entry->pool = zswap_pool_current_get();
if (!entry->pool)
......@@ -1520,7 +1461,6 @@ bool zswap_store(struct folio *folio)
if (!zswap_compress(folio, entry))
goto put_pool;
store_entry:
entry->swpentry = swp;
entry->objcg = objcg;
entry->referenced = true;
......@@ -1569,13 +1509,9 @@ bool zswap_store(struct folio *folio)
return true;
store_failed:
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
zpool_free(entry->pool->zpool, entry->handle);
put_pool:
zswap_pool_put(entry->pool);
}
freepage:
zswap_entry_cache_free(entry);
reject:
......@@ -1638,10 +1574,7 @@ bool zswap_load(struct folio *folio)
if (!entry)
return false;
if (entry->length)
zswap_decompress(entry, folio);
else
zswap_fill_folio(folio, entry->value);
count_vm_event(ZSWPIN);
if (entry->objcg)
......@@ -1744,8 +1677,6 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, NULL, &total_size_fops);
debugfs_create_atomic_t("stored_pages", 0444,
zswap_debugfs_root, &zswap_stored_pages);
debugfs_create_atomic_t("same_filled_pages", 0444,
zswap_debugfs_root, &zswap_same_filled_pages);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment