Commit a7d1f22b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

mm: turn migrate_vma upside down

There isn't any good reason to pass callbacks to migrate_vma.  Instead
we can just export the three steps done by this function to drivers and
let them sequence the operation without callbacks.  This removes a lot
of boilerplate code as-is, and will allow the drivers to drastically
improve code flow and error handling further on.

Link: https://lore.kernel.org/r/20190814075928.23766-2-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Tested-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f4fb3b9c
......@@ -339,58 +339,8 @@ Migration to and from device memory
===================================
Because the CPU cannot access device memory, migration must use the device DMA
engine to perform copy from and to device memory. For this we need a new
migration helper::
int migrate_vma(const struct migrate_vma_ops *ops,
struct vm_area_struct *vma,
unsigned long mentries,
unsigned long start,
unsigned long end,
unsigned long *src,
unsigned long *dst,
void *private);
Unlike other migration functions it works on a range of virtual address, there
are two reasons for that. First, device DMA copy has a high setup overhead cost
and thus batching multiple pages is needed as otherwise the migration overhead
makes the whole exercise pointless. The second reason is because the
migration might be for a range of addresses the device is actively accessing.
The migrate_vma_ops struct defines two callbacks. First one (alloc_and_copy())
controls destination memory allocation and copy operation. Second one is there
to allow the device driver to perform cleanup operations after migration::
struct migrate_vma_ops {
void (*alloc_and_copy)(struct vm_area_struct *vma,
const unsigned long *src,
unsigned long *dst,
unsigned long start,
unsigned long end,
void *private);
void (*finalize_and_map)(struct vm_area_struct *vma,
const unsigned long *src,
const unsigned long *dst,
unsigned long start,
unsigned long end,
void *private);
};
It is important to stress that these migration helpers allow for holes in the
virtual address range. Some pages in the range might not be migrated for all
the usual reasons (page is pinned, page is locked, ...). This helper does not
fail but just skips over those pages.
The alloc_and_copy() might decide to not migrate all pages in the
range (for reasons under the callback control). For those, the callback just
has to leave the corresponding dst entry empty.
Finally, the migration of the struct page might fail (for file backed page) for
various reasons (failure to freeze reference, or update page cache, ...). If
that happens, then the finalize_and_map() can catch any pages that were not
migrated. Note those pages were still copied to a new page and thus we wasted
bandwidth but this is considered as a rare event and a price that we are
willing to pay to keep all the code simpler.
engine to perform copy from and to device memory. For this we need to use
migrate_vma_setup(), migrate_vma_pages(), and migrate_vma_finalize() helpers.
Memory cgroup (memcg) and rss accounting
......
......@@ -131,9 +131,8 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
unsigned long *dst_pfns,
unsigned long start,
unsigned long end,
void *private)
struct nouveau_dmem_fault *fault)
{
struct nouveau_dmem_fault *fault = private;
struct nouveau_drm *drm = fault->drm;
struct device *dev = drm->dev->dev;
unsigned long addr, i, npages = 0;
......@@ -230,14 +229,9 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
}
}
void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
const unsigned long *src_pfns,
const unsigned long *dst_pfns,
unsigned long start,
unsigned long end,
void *private)
static void
nouveau_dmem_fault_finalize_and_map(struct nouveau_dmem_fault *fault)
{
struct nouveau_dmem_fault *fault = private;
struct nouveau_drm *drm = fault->drm;
if (fault->fence) {
......@@ -257,29 +251,35 @@ void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
kfree(fault->dma);
}
static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
.alloc_and_copy = nouveau_dmem_fault_alloc_and_copy,
.finalize_and_map = nouveau_dmem_fault_finalize_and_map,
};
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
unsigned long src[1] = {0}, dst[1] = {0};
struct migrate_vma args = {
.vma = vmf->vma,
.start = vmf->address,
.end = vmf->address + PAGE_SIZE,
.src = src,
.dst = dst,
};
struct nouveau_dmem_fault fault = { .drm = dmem->drm };
int ret;
/*
* FIXME what we really want is to find some heuristic to migrate more
* than just one page on CPU fault. When such fault happens it is very
* likely that more surrounding page will CPU fault too.
*/
ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
vmf->address, vmf->address + PAGE_SIZE,
src, dst, &fault);
if (ret)
if (migrate_vma_setup(&args) < 0)
return VM_FAULT_SIGBUS;
if (!args.cpages)
return 0;
nouveau_dmem_fault_alloc_and_copy(args.vma, src, dst, args.start,
args.end, &fault);
migrate_vma_pages(&args);
nouveau_dmem_fault_finalize_and_map(&fault);
migrate_vma_finalize(&args);
if (dst[0] == MIGRATE_PFN_ERROR)
return VM_FAULT_SIGBUS;
......@@ -648,9 +648,8 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
unsigned long *dst_pfns,
unsigned long start,
unsigned long end,
void *private)
struct nouveau_migrate *migrate)
{
struct nouveau_migrate *migrate = private;
struct nouveau_drm *drm = migrate->drm;
struct device *dev = drm->dev->dev;
unsigned long addr, i, npages = 0;
......@@ -747,14 +746,9 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
}
}
void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
const unsigned long *src_pfns,
const unsigned long *dst_pfns,
unsigned long start,
unsigned long end,
void *private)
static void
nouveau_dmem_migrate_finalize_and_map(struct nouveau_migrate *migrate)
{
struct nouveau_migrate *migrate = private;
struct nouveau_drm *drm = migrate->drm;
if (migrate->fence) {
......@@ -779,10 +773,15 @@ void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
*/
}
static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
.alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
.finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
};
static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
struct nouveau_migrate *migrate)
{
nouveau_dmem_migrate_alloc_and_copy(args->vma, args->src, args->dst,
args->start, args->end, migrate);
migrate_vma_pages(args);
nouveau_dmem_migrate_finalize_and_map(migrate);
migrate_vma_finalize(args);
}
int
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
......@@ -790,40 +789,45 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
unsigned long start,
unsigned long end)
{
unsigned long *src_pfns, *dst_pfns, npages;
struct nouveau_migrate migrate = {0};
unsigned long i, c, max;
int ret = 0;
npages = (end - start) >> PAGE_SHIFT;
max = min(SG_MAX_SINGLE_ALLOC, npages);
src_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
if (src_pfns == NULL)
return -ENOMEM;
dst_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
if (dst_pfns == NULL) {
kfree(src_pfns);
return -ENOMEM;
}
unsigned long npages = (end - start) >> PAGE_SHIFT;
unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
struct migrate_vma args = {
.vma = vma,
.start = start,
};
struct nouveau_migrate migrate = {
.drm = drm,
.vma = vma,
.npages = npages,
};
unsigned long c, i;
int ret = -ENOMEM;
args.src = kzalloc(sizeof(long) * max, GFP_KERNEL);
if (!args.src)
goto out;
args.dst = kzalloc(sizeof(long) * max, GFP_KERNEL);
if (!args.dst)
goto out_free_src;
migrate.drm = drm;
migrate.vma = vma;
migrate.npages = npages;
for (i = 0; i < npages; i += c) {
unsigned long next;
c = min(SG_MAX_SINGLE_ALLOC, npages);
next = start + (c << PAGE_SHIFT);
ret = migrate_vma(&nouveau_dmem_migrate_ops, vma, start,
next, src_pfns, dst_pfns, &migrate);
args.end = start + (c << PAGE_SHIFT);
ret = migrate_vma_setup(&args);
if (ret)
goto out;
start = next;
goto out_free_dst;
if (args.cpages)
nouveau_dmem_migrate_chunk(&args, &migrate);
args.start = args.end;
}
ret = 0;
out_free_dst:
kfree(args.dst);
out_free_src:
kfree(args.src);
out:
kfree(dst_pfns);
kfree(src_pfns);
return ret;
}
......
......@@ -182,107 +182,27 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}
/*
* struct migrate_vma_ops - migrate operation callback
*
* @alloc_and_copy: alloc destination memory and copy source memory to it
* @finalize_and_map: allow caller to map the successfully migrated pages
*
*
* The alloc_and_copy() callback happens once all source pages have been locked,
* unmapped and checked (checked whether pinned or not). All pages that can be
* migrated will have an entry in the src array set with the pfn value of the
* page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
* flags might be set but should be ignored by the callback).
*
* The alloc_and_copy() callback can then allocate destination memory and copy
* source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
* MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
* callback must update each corresponding entry in the dst array with the pfn
* value of the destination page and with the MIGRATE_PFN_VALID and
* MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
* locked, via lock_page()).
*
* At this point the alloc_and_copy() callback is done and returns.
*
* Note that the callback does not have to migrate all the pages that are
* marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
* from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
* set in the src array entry). If the device driver cannot migrate a device
* page back to system memory, then it must set the corresponding dst array
* entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
* access any of the virtual addresses originally backed by this page. Because
* a SIGBUS is such a severe result for the userspace process, the device
* driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
* unrecoverable state.
*
* For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
* do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
* allowing device driver to allocate device memory for those unback virtual
* address. For this the device driver simply have to allocate device memory
* and properly set the destination entry like for regular migration. Note that
* this can still fails and thus inside the device driver must check if the
* migration was successful for those entry inside the finalize_and_map()
* callback just like for regular migration.
*
* THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
* OR BAD THINGS WILL HAPPEN !
struct migrate_vma {
struct vm_area_struct *vma;
/*
* Both src and dst array must be big enough for
* (end - start) >> PAGE_SHIFT entries.
*
*
* The finalize_and_map() callback happens after struct page migration from
* source to destination (destination struct pages are the struct pages for the
* memory allocated by the alloc_and_copy() callback). Migration can fail, and
* thus the finalize_and_map() allows the driver to inspect which pages were
* successfully migrated, and which were not. Successfully migrated pages will
* have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
*
* It is safe to update device page table from within the finalize_and_map()
* callback because both destination and source page are still locked, and the
* mmap_sem is held in read mode (hence no one can unmap the range being
* migrated).
*
* Once callback is done cleaning up things and updating its page table (if it
* chose to do so, this is not an obligation) then it returns. At this point,
* the HMM core will finish up the final steps, and the migration is complete.
*
* THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
* ENTRIES OR BAD THINGS WILL HAPPEN !
* The src array must not be modified by the caller after
* migrate_vma_setup(), and must not change the dst array after
* migrate_vma_pages() returns.
*/
struct migrate_vma_ops {
void (*alloc_and_copy)(struct vm_area_struct *vma,
const unsigned long *src,
unsigned long *dst,
unsigned long start,
unsigned long end,
void *private);
void (*finalize_and_map)(struct vm_area_struct *vma,
const unsigned long *src,
const unsigned long *dst,
unsigned long start,
unsigned long end,
void *private);
unsigned long *dst;
unsigned long *src;
unsigned long cpages;
unsigned long npages;
unsigned long start;
unsigned long end;
};
#if defined(CONFIG_MIGRATE_VMA_HELPER)
int migrate_vma(const struct migrate_vma_ops *ops,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
unsigned long *src,
unsigned long *dst,
void *private);
#else
static inline int migrate_vma(const struct migrate_vma_ops *ops,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
unsigned long *src,
unsigned long *dst,
void *private)
{
return -EINVAL;
}
#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
#endif /* CONFIG_MIGRATION */
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment