Commit 325efb16 authored by Barry Song's avatar Barry Song Committed by Andrew Morton

mm: add nr argument in mem_cgroup_swapin_uncharge_swap() helper to support large folios

With large folios swap-in, we might need to uncharge multiple entries all
together, add nr argument in mem_cgroup_swapin_uncharge_swap().

For the existing two users, just pass nr=1.

Link: https://lkml.kernel.org/r/20240908232119.2157-3-21cnbao@gmail.comSigned-off-by: default avatarBarry Song <v-songbaohua@oppo.com>
Acked-by: default avatarChris Li <chrisl@kernel.org>
Reviewed-by: default avatarYosry Ahmed <yosryahmed@google.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Gao Xiang <xiang@kernel.org>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Chuanhua Han <hanchuanhua@oppo.com>
Cc: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9d57090e
...@@ -699,7 +699,8 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, ...@@ -699,7 +699,8 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry); gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
void __mem_cgroup_uncharge(struct folio *folio); void __mem_cgroup_uncharge(struct folio *folio);
...@@ -1206,7 +1207,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, ...@@ -1206,7 +1207,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0; return 0;
} }
static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
{ {
} }
......
...@@ -4559,14 +4559,15 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, ...@@ -4559,14 +4559,15 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
/* /*
* mem_cgroup_swapin_uncharge_swap - uncharge swap slot * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
* @entry: swap entry for which the page is charged * @entry: the first swap entry for which the pages are charged
* @nr_pages: number of pages which will be uncharged
* *
* Call this function after successfully adding the charged page to swapcache. * Call this function after successfully adding the charged page to swapcache.
* *
* Note: This function assumes the page for which swap slot is being uncharged * Note: This function assumes the page for which swap slot is being uncharged
* is order 0 page. * is order 0 page.
*/ */
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
{ {
/* /*
* Cgroup1's unified memory+swap counter has been charged with the * Cgroup1's unified memory+swap counter has been charged with the
...@@ -4586,7 +4587,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) ...@@ -4586,7 +4587,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
* let's not wait for it. The page already received a * let's not wait for it. The page already received a
* memory+swap charge, drop the swap entry duplicate. * memory+swap charge, drop the swap entry duplicate.
*/ */
mem_cgroup_uncharge_swap(entry, 1); mem_cgroup_uncharge_swap(entry, nr_pages);
} }
} }
......
...@@ -4104,7 +4104,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -4104,7 +4104,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out_page; goto out_page;
} }
mem_cgroup_swapin_uncharge_swap(entry); mem_cgroup_swapin_uncharge_swap(entry, 1);
shadow = get_shadow_from_swap_cache(entry); shadow = get_shadow_from_swap_cache(entry);
if (shadow) if (shadow)
......
...@@ -522,7 +522,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -522,7 +522,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock; goto fail_unlock;
mem_cgroup_swapin_uncharge_swap(entry); mem_cgroup_swapin_uncharge_swap(entry, 1);
if (shadow) if (shadow)
workingset_refault(new_folio, shadow); workingset_refault(new_folio, shadow);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment