lab.nexedi.com will be down from Thursday, 20 March 2025, 07:30:00 UTC for a duration of approximately 2 hours

Commit d07a46a4 authored by Chris Li's avatar Chris Li Committed by Andrew Morton

mm: swap: mTHP allocate swap entries from nonfull list

Track the nonfull cluster as well as the empty cluster on lists.  Each
order has one nonfull cluster list.

The cluster will remember which order it was used during new cluster
allocation.

When the cluster has free entry, add to the nonfull[order] list.   When
the free cluster list is empty, also allocate from the nonempty list of
that order.

This improves the mTHP swap allocation success rate.

There are limitations if the distribution of numbers of different orders
of mTHP changes a lot.  e.g.  there are a lot of nonfull cluster assign to
order A while later time there are a lot of order B allocation while very
little allocation in order A.  Currently the cluster used by order A will
not reused by order B unless the cluster is 100% empty.

Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-2-cb9c148b9297@kernel.orgSigned-off-by: default avatarChris Li <chrisl@kernel.org>
Reported-by: default avatarBarry Song <21cnbao@gmail.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 73ed0baa
...@@ -254,9 +254,11 @@ struct swap_cluster_info { ...@@ -254,9 +254,11 @@ struct swap_cluster_info {
*/ */
u16 count; u16 count;
u8 flags; u8 flags;
u8 order;
struct list_head list; struct list_head list;
}; };
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
/* /*
* The first page in the swap file is the swap header, which is always marked * The first page in the swap file is the swap header, which is always marked
...@@ -294,6 +296,8 @@ struct swap_info_struct { ...@@ -294,6 +296,8 @@ struct swap_info_struct {
unsigned char *swap_map; /* vmalloc'ed array of usage counts */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
struct list_head free_clusters; /* free clusters list */ struct list_head free_clusters; /* free clusters list */
struct list_head nonfull_clusters[SWAP_NR_ORDERS];
/* list of cluster that contains at least one free slot */
unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */ unsigned int pages; /* total of usable pages of swap */
......
...@@ -361,14 +361,22 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, ...@@ -361,14 +361,22 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
memset(si->swap_map + idx * SWAPFILE_CLUSTER, memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER); SWAP_MAP_BAD, SWAPFILE_CLUSTER);
VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
if (ci->flags & CLUSTER_FLAG_NONFULL)
list_move_tail(&ci->list, &si->discard_clusters);
else
list_add_tail(&ci->list, &si->discard_clusters); list_add_tail(&ci->list, &si->discard_clusters);
ci->flags = 0;
schedule_work(&si->discard_work); schedule_work(&si->discard_work);
} }
static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{ {
ci->flags = CLUSTER_FLAG_FREE; if (ci->flags & CLUSTER_FLAG_NONFULL)
list_move_tail(&ci->list, &si->free_clusters);
else
list_add_tail(&ci->list, &si->free_clusters); list_add_tail(&ci->list, &si->free_clusters);
ci->flags = CLUSTER_FLAG_FREE;
} }
/* /*
...@@ -491,8 +499,15 @@ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluste ...@@ -491,8 +499,15 @@ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluste
VM_BUG_ON(ci->count == 0); VM_BUG_ON(ci->count == 0);
ci->count--; ci->count--;
if (!ci->count) if (!ci->count) {
free_cluster(p, ci); free_cluster(p, ci);
return;
}
if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]);
ci->flags |= CLUSTER_FLAG_NONFULL;
}
} }
/* /*
...@@ -553,6 +568,19 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, ...@@ -553,6 +568,19 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
if (tmp == SWAP_NEXT_INVALID) { if (tmp == SWAP_NEXT_INVALID) {
if (!list_empty(&si->free_clusters)) { if (!list_empty(&si->free_clusters)) {
ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
list_del(&ci->list);
spin_lock(&ci->lock);
ci->order = order;
ci->flags = 0;
spin_unlock(&ci->lock);
tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER;
} else if (!list_empty(&si->nonfull_clusters[order])) {
ci = list_first_entry(&si->nonfull_clusters[order],
struct swap_cluster_info, list);
list_del(&ci->list);
spin_lock(&ci->lock);
ci->flags = 0;
spin_unlock(&ci->lock);
tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER; tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER;
} else if (!list_empty(&si->discard_clusters)) { } else if (!list_empty(&si->discard_clusters)) {
/* /*
...@@ -959,6 +987,7 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) ...@@ -959,6 +987,7 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
ci = lock_cluster(si, offset); ci = lock_cluster(si, offset);
memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
ci->count = 0; ci->count = 0;
ci->order = 0;
ci->flags = 0; ci->flags = 0;
free_cluster(si, ci); free_cluster(si, ci);
unlock_cluster(ci); unlock_cluster(ci);
...@@ -2911,6 +2940,9 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, ...@@ -2911,6 +2940,9 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
INIT_LIST_HEAD(&p->free_clusters); INIT_LIST_HEAD(&p->free_clusters);
INIT_LIST_HEAD(&p->discard_clusters); INIT_LIST_HEAD(&p->discard_clusters);
for (i = 0; i < SWAP_NR_ORDERS; i++)
INIT_LIST_HEAD(&p->nonfull_clusters[i]);
for (i = 0; i < swap_header->info.nr_badpages; i++) { for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i]; unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page) if (page_nr == 0 || page_nr > swap_header->info.last_page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment