Commit f9e54c3a authored by Alex Williamson's avatar Alex Williamson Committed by Andrew Morton

vfio/pci: implement huge_fault support

With the addition of pfnmap support in vmf_insert_pfn_{pmd,pud}() we can
take advantage of PMD and PUD faults to PCI BAR mmaps and create more
efficient mappings.  PCI BARs are always a power of two and will typically
get at least PMD alignment without userspace even trying.  Userspace
alignment for PUD mappings is also not too difficult.

Consolidate faults through a single handler with a new wrapper for
standard single page faults.  The pre-faulting behavior of commit
d71a989c ("vfio/pci: Insert full vma on mmap'd MMIO fault") is removed
in this refactoring since huge_fault will cover the bulk of the faults and
results in more efficient page table usage.  We also want to avoid that
pre-faulted single page mappings preempt huge page mappings.

Link: https://lkml.kernel.org/r/20240826204353.2228736-20-peterx@redhat.comSigned-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3e509c9b
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pfn_t.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -1657,14 +1658,20 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma) ...@@ -1657,14 +1658,20 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma)
return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff; return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
} }
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
unsigned int order)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data; struct vfio_pci_core_device *vdev = vma->vm_private_data;
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
unsigned long addr = vma->vm_start;
vm_fault_t ret = VM_FAULT_SIGBUS; vm_fault_t ret = VM_FAULT_SIGBUS;
if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
ret = VM_FAULT_FALLBACK;
goto out;
}
pfn = vma_to_pfn(vma); pfn = vma_to_pfn(vma);
down_read(&vdev->memory_lock); down_read(&vdev->memory_lock);
...@@ -1672,30 +1679,49 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) ...@@ -1672,30 +1679,49 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
goto out_unlock; goto out_unlock;
switch (order) {
case 0:
ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
if (ret & VM_FAULT_ERROR)
goto out_unlock;
/*
* Pre-fault the remainder of the vma, abort further insertions and
* supress error if fault is encountered during pre-fault.
*/
for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
if (addr == vmf->address)
continue;
if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
break; break;
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
case PMD_ORDER:
ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
PFN_DEV), false);
break;
#endif
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
case PUD_ORDER:
ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
PFN_DEV), false);
break;
#endif
default:
ret = VM_FAULT_FALLBACK;
} }
out_unlock: out_unlock:
up_read(&vdev->memory_lock); up_read(&vdev->memory_lock);
out:
dev_dbg_ratelimited(&vdev->pdev->dev,
"%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n",
__func__, order,
vma->vm_pgoff >>
(VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT),
pgoff, (unsigned int)ret);
return ret; return ret;
} }
static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf)
{
return vfio_pci_mmap_huge_fault(vmf, 0);
}
static const struct vm_operations_struct vfio_pci_mmap_ops = { static const struct vm_operations_struct vfio_pci_mmap_ops = {
.fault = vfio_pci_mmap_fault, .fault = vfio_pci_mmap_page_fault,
#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
.huge_fault = vfio_pci_mmap_huge_fault,
#endif
}; };
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma) int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment