Commit b3092b3b authored by Bob Liu's avatar Bob Liu Committed by Linus Torvalds

thp: cleanup: introduce mk_huge_pmd()

Introduce mk_huge_pmd() to simplify the code
Signed-off-by: default avatarBob Liu <lliubbo@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Ni zhan Chen <nizhan.chen@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fa475e51
...@@ -606,6 +606,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) ...@@ -606,6 +606,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
return pmd; return pmd;
} }
static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
{
pmd_t entry;
entry = mk_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
return entry;
}
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd, unsigned long haddr, pmd_t *pmd,
...@@ -629,9 +638,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, ...@@ -629,9 +638,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
pte_free(mm, pgtable); pte_free(mm, pgtable);
} else { } else {
pmd_t entry; pmd_t entry;
entry = mk_pmd(page, vma->vm_page_prot); entry = mk_huge_pmd(page, vma);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
/* /*
* The spinlocking to take the lru_lock inside * The spinlocking to take the lru_lock inside
* page_add_new_anon_rmap() acts as a full memory * page_add_new_anon_rmap() acts as a full memory
...@@ -951,9 +958,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -951,9 +958,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
} else { } else {
pmd_t entry; pmd_t entry;
VM_BUG_ON(!PageHead(page)); VM_BUG_ON(!PageHead(page));
entry = mk_pmd(new_page, vma->vm_page_prot); entry = mk_huge_pmd(new_page, vma);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
pmdp_clear_flush(vma, haddr, pmd); pmdp_clear_flush(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr); page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
...@@ -2000,9 +2005,7 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -2000,9 +2005,7 @@ static void collapse_huge_page(struct mm_struct *mm,
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd); pgtable = pmd_pgtable(_pmd);
_pmd = mk_pmd(new_page, vma->vm_page_prot); _pmd = mk_huge_pmd(new_page, vma);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
_pmd = pmd_mkhuge(_pmd);
/* /*
* spin_lock() below is not the equivalent of smp_wmb(), so * spin_lock() below is not the equivalent of smp_wmb(), so
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment