Commit ff05c0c6 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong

All device driver we care about are using 64bits page table entry.  In
order to match this and to avoid useless define convert all HMM pfn to
directly use uint64_t.  It is a first step on the road to allow driver to
directly use pfn value return by HMM (saving memory and CPU cycles use for
conversion between the two).

Link: http://lkml.kernel.org/r/20180323005527.758-9-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 86586a41
...@@ -80,8 +80,6 @@ ...@@ -80,8 +80,6 @@
struct hmm; struct hmm;
/* /*
* hmm_pfn_t - HMM uses its own pfn type to keep several flags per page
*
* Flags: * Flags:
* HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
* HMM_PFN_WRITE: CPU page table has write permission set * HMM_PFN_WRITE: CPU page table has write permission set
...@@ -93,8 +91,6 @@ struct hmm; ...@@ -93,8 +91,6 @@ struct hmm;
* set and the pfn value is undefined. * set and the pfn value is undefined.
* HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE) * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE)
*/ */
typedef unsigned long hmm_pfn_t;
#define HMM_PFN_VALID (1 << 0) #define HMM_PFN_VALID (1 << 0)
#define HMM_PFN_WRITE (1 << 1) #define HMM_PFN_WRITE (1 << 1)
#define HMM_PFN_ERROR (1 << 2) #define HMM_PFN_ERROR (1 << 2)
...@@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t; ...@@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t;
#define HMM_PFN_SHIFT 6 #define HMM_PFN_SHIFT 6
/* /*
* hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
* @pfn: hmm_pfn_t to convert to struct page * @pfn: HMM pfn value to get corresponding struct page from
* Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
* *
* If the hmm_pfn_t is valid (ie valid flag set) then return the struct page * If the HMM pfn is valid (ie valid flag set) then return the struct page
* matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL. * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
*/ */
static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn) static inline struct page *hmm_pfn_to_page(uint64_t pfn)
{ {
if (!(pfn & HMM_PFN_VALID)) if (!(pfn & HMM_PFN_VALID))
return NULL; return NULL;
...@@ -119,11 +115,11 @@ static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn) ...@@ -119,11 +115,11 @@ static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn)
} }
/* /*
* hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
* @pfn: hmm_pfn_t to extract pfn from * @pfn: HMM pfn value to extract pfn from
* Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise * Returns: pfn value if HMM pfn is valid, -1UL otherwise
*/ */
static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn) static inline unsigned long hmm_pfn_to_pfn(uint64_t pfn)
{ {
if (!(pfn & HMM_PFN_VALID)) if (!(pfn & HMM_PFN_VALID))
return -1UL; return -1UL;
...@@ -131,21 +127,21 @@ static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn) ...@@ -131,21 +127,21 @@ static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn)
} }
/* /*
* hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
* @page: struct page pointer for which to create the hmm_pfn_t * @page: struct page pointer for which to create the HMM pfn
* Returns: valid hmm_pfn_t for the page * Returns: valid HMM pfn for the page
*/ */
static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page) static inline uint64_t hmm_pfn_from_page(struct page *page)
{ {
return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID; return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID;
} }
/* /*
* hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
* @pfn: pfn value for which to create the hmm_pfn_t * @pfn: pfn value for which to create the HMM pfn
* Returns: valid hmm_pfn_t for the pfn * Returns: valid HMM pfn for the pfn
*/ */
static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn) static inline uint64_t hmm_pfn_from_pfn(unsigned long pfn)
{ {
return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID; return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID;
} }
...@@ -284,7 +280,7 @@ struct hmm_range { ...@@ -284,7 +280,7 @@ struct hmm_range {
struct list_head list; struct list_head list;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
hmm_pfn_t *pfns; uint64_t *pfns;
bool valid; bool valid;
}; };
...@@ -307,7 +303,7 @@ bool hmm_vma_range_done(struct hmm_range *range); ...@@ -307,7 +303,7 @@ bool hmm_vma_range_done(struct hmm_range *range);
/* /*
* Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
* not migrate any device memory back to system memory. The hmm_pfn_t array will * not migrate any device memory back to system memory. The HMM pfn array will
* be updated with the fault result and current snapshot of the CPU page table * be updated with the fault result and current snapshot of the CPU page table
* for the range. * for the range.
* *
...@@ -316,7 +312,7 @@ bool hmm_vma_range_done(struct hmm_range *range); ...@@ -316,7 +312,7 @@ bool hmm_vma_range_done(struct hmm_range *range);
* function returns -EAGAIN. * function returns -EAGAIN.
* *
* Return value does not reflect if the fault was successful for every single * Return value does not reflect if the fault was successful for every single
* address or not. Therefore, the caller must to inspect the hmm_pfn_t array to * address or not. Therefore, the caller must to inspect the HMM pfn array to
* determine fault status for each address. * determine fault status for each address.
* *
* Trying to fault inside an invalid vma will result in -EINVAL. * Trying to fault inside an invalid vma will result in -EINVAL.
......
...@@ -304,7 +304,7 @@ struct hmm_vma_walk { ...@@ -304,7 +304,7 @@ struct hmm_vma_walk {
static int hmm_vma_do_fault(struct mm_walk *walk, static int hmm_vma_do_fault(struct mm_walk *walk,
unsigned long addr, unsigned long addr,
hmm_pfn_t *pfn) uint64_t *pfn)
{ {
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
...@@ -324,7 +324,7 @@ static int hmm_vma_do_fault(struct mm_walk *walk, ...@@ -324,7 +324,7 @@ static int hmm_vma_do_fault(struct mm_walk *walk,
return -EAGAIN; return -EAGAIN;
} }
static void hmm_pfns_special(hmm_pfn_t *pfns, static void hmm_pfns_special(uint64_t *pfns,
unsigned long addr, unsigned long addr,
unsigned long end) unsigned long end)
{ {
...@@ -338,7 +338,7 @@ static int hmm_pfns_bad(unsigned long addr, ...@@ -338,7 +338,7 @@ static int hmm_pfns_bad(unsigned long addr,
{ {
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
hmm_pfn_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long i; unsigned long i;
i = (addr - range->start) >> PAGE_SHIFT; i = (addr - range->start) >> PAGE_SHIFT;
...@@ -348,7 +348,7 @@ static int hmm_pfns_bad(unsigned long addr, ...@@ -348,7 +348,7 @@ static int hmm_pfns_bad(unsigned long addr,
return 0; return 0;
} }
static void hmm_pfns_clear(hmm_pfn_t *pfns, static void hmm_pfns_clear(uint64_t *pfns,
unsigned long addr, unsigned long addr,
unsigned long end) unsigned long end)
{ {
...@@ -362,7 +362,7 @@ static int hmm_vma_walk_hole(unsigned long addr, ...@@ -362,7 +362,7 @@ static int hmm_vma_walk_hole(unsigned long addr,
{ {
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
hmm_pfn_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long i; unsigned long i;
hmm_vma_walk->last = addr; hmm_vma_walk->last = addr;
...@@ -387,7 +387,7 @@ static int hmm_vma_walk_clear(unsigned long addr, ...@@ -387,7 +387,7 @@ static int hmm_vma_walk_clear(unsigned long addr,
{ {
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
hmm_pfn_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long i; unsigned long i;
hmm_vma_walk->last = addr; hmm_vma_walk->last = addr;
...@@ -414,7 +414,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -414,7 +414,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
hmm_pfn_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long addr = start, i; unsigned long addr = start, i;
bool write_fault; bool write_fault;
pte_t *ptep; pte_t *ptep;
...@@ -431,7 +431,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -431,7 +431,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
unsigned long pfn; unsigned long pfn;
hmm_pfn_t flag = 0; uint64_t flag = 0;
pmd_t pmd; pmd_t pmd;
/* /*
...@@ -456,7 +456,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -456,7 +456,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
pfn = pmd_pfn(pmd) + pte_index(addr); pfn = pmd_pfn(pmd) + pte_index(addr);
flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
for (; addr < end; addr += PAGE_SIZE, i++, pfn++) for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; pfns[i] = hmm_pfn_from_pfn(pfn) | flag;
return 0; return 0;
} }
...@@ -490,7 +490,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -490,7 +490,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
* device and report anything else as error. * device and report anything else as error.
*/ */
if (is_device_private_entry(entry)) { if (is_device_private_entry(entry)) {
pfns[i] = hmm_pfn_t_from_pfn(swp_offset(entry)); pfns[i] = hmm_pfn_from_pfn(swp_offset(entry));
if (is_write_device_private_entry(entry)) { if (is_write_device_private_entry(entry)) {
pfns[i] |= HMM_PFN_WRITE; pfns[i] |= HMM_PFN_WRITE;
} else if (write_fault) } else if (write_fault)
...@@ -515,7 +515,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -515,7 +515,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (write_fault && !pte_write(pte)) if (write_fault && !pte_write(pte))
goto fault; goto fault;
pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)); pfns[i] = hmm_pfn_from_pfn(pte_pfn(pte));
pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0; pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
continue; continue;
...@@ -678,8 +678,8 @@ EXPORT_SYMBOL(hmm_vma_range_done); ...@@ -678,8 +678,8 @@ EXPORT_SYMBOL(hmm_vma_range_done);
* This is similar to a regular CPU page fault except that it will not trigger * This is similar to a regular CPU page fault except that it will not trigger
* any memory migration if the memory being faulted is not accessible by CPUs. * any memory migration if the memory being faulted is not accessible by CPUs.
* *
* On error, for one virtual address in the range, the function will set the * On error, for one virtual address in the range, the function will mark the
* hmm_pfn_t error flag for the corresponding pfn entry. * corresponding HMM pfn entry with an error flag.
* *
* Expected use pattern: * Expected use pattern:
* retry: * retry:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment