Commit b650e1d2 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/memory-failure: pass the folio to collect_procs_ksm()

We've already calculated it, so pass it in instead of recalculating it in
collect_procs_ksm().

Link: https://lkml.kernel.org/r/20240412193510.2356957-12-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJane Chu <jane.chu@oracle.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0edb5b28
...@@ -81,15 +81,9 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, ...@@ -81,15 +81,9 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
void collect_procs_ksm(struct folio *folio, struct page *page,
#ifdef CONFIG_MEMORY_FAILURE struct list_head *to_kill, int force_early);
void collect_procs_ksm(struct page *page, struct list_head *to_kill,
int force_early);
#endif
#ifdef CONFIG_PROC_FS
long ksm_process_profit(struct mm_struct *); long ksm_process_profit(struct mm_struct *);
#endif /* CONFIG_PROC_FS */
#else /* !CONFIG_KSM */ #else /* !CONFIG_KSM */
...@@ -120,12 +114,10 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) ...@@ -120,12 +114,10 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{ {
} }
#ifdef CONFIG_MEMORY_FAILURE static inline void collect_procs_ksm(struct folio *folio, struct page *page,
static inline void collect_procs_ksm(struct page *page,
struct list_head *to_kill, int force_early) struct list_head *to_kill, int force_early)
{ {
} }
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
......
...@@ -3178,12 +3178,11 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) ...@@ -3178,12 +3178,11 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
/* /*
* Collect processes when the error hit an ksm page. * Collect processes when the error hit an ksm page.
*/ */
void collect_procs_ksm(struct page *page, struct list_head *to_kill, void collect_procs_ksm(struct folio *folio, struct page *page,
int force_early) struct list_head *to_kill, int force_early)
{ {
struct ksm_stable_node *stable_node; struct ksm_stable_node *stable_node;
struct ksm_rmap_item *rmap_item; struct ksm_rmap_item *rmap_item;
struct folio *folio = page_folio(page);
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct task_struct *tsk; struct task_struct *tsk;
......
...@@ -729,7 +729,7 @@ static void collect_procs(struct folio *folio, struct page *page, ...@@ -729,7 +729,7 @@ static void collect_procs(struct folio *folio, struct page *page,
if (!folio->mapping) if (!folio->mapping)
return; return;
if (unlikely(folio_test_ksm(folio))) if (unlikely(folio_test_ksm(folio)))
collect_procs_ksm(page, tokill, force_early); collect_procs_ksm(folio, page, tokill, force_early);
else if (folio_test_anon(folio)) else if (folio_test_anon(folio))
collect_procs_anon(folio, page, tokill, force_early); collect_procs_anon(folio, page, tokill, force_early);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment