Commit c93012d8 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

dax: use huge_zero_folio

Convert from huge_zero_page to huge_zero_folio.

Link: https://lkml.kernel.org/r/20240326202833.523759-8-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e28833bc
...@@ -1207,17 +1207,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, ...@@ -1207,17 +1207,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
pgtable_t pgtable = NULL; pgtable_t pgtable = NULL;
struct page *zero_page; struct folio *zero_folio;
spinlock_t *ptl; spinlock_t *ptl;
pmd_t pmd_entry; pmd_t pmd_entry;
pfn_t pfn; pfn_t pfn;
zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
if (unlikely(!zero_page)) if (unlikely(!zero_folio))
goto fallback; goto fallback;
pfn = page_to_pfn_t(zero_page); pfn = page_to_pfn_t(&zero_folio->page);
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
DAX_PMD | DAX_ZERO_PAGE); DAX_PMD | DAX_ZERO_PAGE);
...@@ -1237,17 +1237,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, ...@@ -1237,17 +1237,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
mm_inc_nr_ptes(vma->vm_mm); mm_inc_nr_ptes(vma->vm_mm);
} }
pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
pmd_entry = pmd_mkhuge(pmd_entry); pmd_entry = pmd_mkhuge(pmd_entry);
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl); spin_unlock(ptl);
trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
fallback: fallback:
if (pgtable) if (pgtable)
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
#else #else
......
...@@ -62,14 +62,14 @@ DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done); ...@@ -62,14 +62,14 @@ DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf, TP_PROTO(struct inode *inode, struct vm_fault *vmf,
struct page *zero_page, struct folio *zero_folio,
void *radix_entry), void *radix_entry),
TP_ARGS(inode, vmf, zero_page, radix_entry), TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, ino) __field(unsigned long, ino)
__field(unsigned long, vm_flags) __field(unsigned long, vm_flags)
__field(unsigned long, address) __field(unsigned long, address)
__field(struct page *, zero_page) __field(struct folio *, zero_folio)
__field(void *, radix_entry) __field(void *, radix_entry)
__field(dev_t, dev) __field(dev_t, dev)
), ),
...@@ -78,17 +78,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, ...@@ -78,17 +78,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
__entry->ino = inode->i_ino; __entry->ino = inode->i_ino;
__entry->vm_flags = vmf->vma->vm_flags; __entry->vm_flags = vmf->vma->vm_flags;
__entry->address = vmf->address; __entry->address = vmf->address;
__entry->zero_page = zero_page; __entry->zero_folio = zero_folio;
__entry->radix_entry = radix_entry; __entry->radix_entry = radix_entry;
), ),
TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p " TP_printk("dev %d:%d ino %#lx %s address %#lx zero_folio %p "
"radix_entry %#lx", "radix_entry %#lx",
MAJOR(__entry->dev), MAJOR(__entry->dev),
MINOR(__entry->dev), MINOR(__entry->dev),
__entry->ino, __entry->ino,
__entry->vm_flags & VM_SHARED ? "shared" : "private", __entry->vm_flags & VM_SHARED ? "shared" : "private",
__entry->address, __entry->address,
__entry->zero_page, __entry->zero_folio,
(unsigned long)__entry->radix_entry (unsigned long)__entry->radix_entry
) )
) )
...@@ -96,8 +96,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, ...@@ -96,8 +96,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \ #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
DEFINE_EVENT(dax_pmd_load_hole_class, name, \ DEFINE_EVENT(dax_pmd_load_hole_class, name, \
TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
struct page *zero_page, void *radix_entry), \ struct folio *zero_folio, void *radix_entry), \
TP_ARGS(inode, vmf, zero_page, radix_entry)) TP_ARGS(inode, vmf, zero_folio, radix_entry))
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole); DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback); DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment