Commit 5504ed29 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/hmm: do not differentiate between empty entry or missing directory

There is no point in differentiating between a range for which there is
not even a directory (and thus entries) and empty entry (pte_none() or
pmd_none() returns true).

Simply drop the distinction ie remove HMM_PFN_EMPTY flag and merge now
duplicate hmm_vma_walk_hole() and hmm_vma_walk_clear() functions.

Link: http://lkml.kernel.org/r/20180323005527.758-11-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 855ce7d2
...@@ -84,7 +84,6 @@ struct hmm; ...@@ -84,7 +84,6 @@ struct hmm;
* HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
* HMM_PFN_WRITE: CPU page table has write permission set * HMM_PFN_WRITE: CPU page table has write permission set
* HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
* HMM_PFN_EMPTY: corresponding CPU page table entry is pte_none()
* HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
* result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not
* be mirrored by a device, because the entry will never have HMM_PFN_VALID * be mirrored by a device, because the entry will never have HMM_PFN_VALID
...@@ -94,10 +93,9 @@ struct hmm; ...@@ -94,10 +93,9 @@ struct hmm;
#define HMM_PFN_VALID (1 << 0) #define HMM_PFN_VALID (1 << 0)
#define HMM_PFN_WRITE (1 << 1) #define HMM_PFN_WRITE (1 << 1)
#define HMM_PFN_ERROR (1 << 2) #define HMM_PFN_ERROR (1 << 2)
#define HMM_PFN_EMPTY (1 << 3) #define HMM_PFN_SPECIAL (1 << 3)
#define HMM_PFN_SPECIAL (1 << 4) #define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 4)
#define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 5) #define HMM_PFN_SHIFT 5
#define HMM_PFN_SHIFT 6
/* /*
* hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
......
...@@ -348,6 +348,16 @@ static void hmm_pfns_clear(uint64_t *pfns, ...@@ -348,6 +348,16 @@ static void hmm_pfns_clear(uint64_t *pfns,
*pfns = 0; *pfns = 0;
} }
/*
* hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
* @start: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @walk: mm_walk structure
* Returns: 0 on success, -EAGAIN after page fault, or page fault error
*
* This function will be called whenever pmd_none() or pte_none() returns true,
* or whenever there is no page directory covering the virtual address range.
*/
static int hmm_vma_walk_hole(unsigned long addr, static int hmm_vma_walk_hole(unsigned long addr,
unsigned long end, unsigned long end,
struct mm_walk *walk) struct mm_walk *walk)
...@@ -357,31 +367,6 @@ static int hmm_vma_walk_hole(unsigned long addr, ...@@ -357,31 +367,6 @@ static int hmm_vma_walk_hole(unsigned long addr,
uint64_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long i; unsigned long i;
hmm_vma_walk->last = addr;
i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++) {
pfns[i] = HMM_PFN_EMPTY;
if (hmm_vma_walk->fault) {
int ret;
ret = hmm_vma_do_fault(walk, addr, &pfns[i]);
if (ret != -EAGAIN)
return ret;
}
}
return hmm_vma_walk->fault ? -EAGAIN : 0;
}
static int hmm_vma_walk_clear(unsigned long addr,
unsigned long end,
struct mm_walk *walk)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns;
unsigned long i;
hmm_vma_walk->last = addr; hmm_vma_walk->last = addr;
i = (addr - range->start) >> PAGE_SHIFT; i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++) { for (; addr < end; addr += PAGE_SIZE, i++) {
...@@ -440,10 +425,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -440,10 +425,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again; goto again;
if (pmd_protnone(pmd)) if (pmd_protnone(pmd))
return hmm_vma_walk_clear(start, end, walk); return hmm_vma_walk_hole(start, end, walk);
if (write_fault && !pmd_write(pmd)) if (write_fault && !pmd_write(pmd))
return hmm_vma_walk_clear(start, end, walk); return hmm_vma_walk_hole(start, end, walk);
pfn = pmd_pfn(pmd) + pte_index(addr); pfn = pmd_pfn(pmd) + pte_index(addr);
flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
...@@ -462,7 +447,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -462,7 +447,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
pfns[i] = 0; pfns[i] = 0;
if (pte_none(pte)) { if (pte_none(pte)) {
pfns[i] = HMM_PFN_EMPTY; pfns[i] = 0;
if (hmm_vma_walk->fault) if (hmm_vma_walk->fault)
goto fault; goto fault;
continue; continue;
...@@ -513,8 +498,8 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -513,8 +498,8 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
fault: fault:
pte_unmap(ptep); pte_unmap(ptep);
/* Fault all pages in range */ /* Fault any virtual address we were asked to fault */
return hmm_vma_walk_clear(start, end, walk); return hmm_vma_walk_hole(start, end, walk);
} }
pte_unmap(ptep - 1); pte_unmap(ptep - 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment