Commit b7a2750e authored by Huacai Chen's avatar Huacai Chen

LoongArch: Add ARCH_HAS_PTE_DEVMAP support

In order for things like get_user_pages() to work on ZONE_DEVICE memory,
we need a software PTE bit to identify device-backed PFNs.  Hook this up
along with the relevant helpers to join in with ARCH_HAS_PTE_DEVMAP.
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent a0f7085f
...@@ -22,6 +22,7 @@ config LOONGARCH ...@@ -22,6 +22,7 @@ config LOONGARCH
select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK if !PREEMPTION
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define _PAGE_PFN_SHIFT 12 #define _PAGE_PFN_SHIFT 12
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48 #define _PAGE_PFN_END_SHIFT 48
#define _PAGE_DEVMAP_SHIFT 59
#define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62 #define _PAGE_NO_EXEC_SHIFT 62
...@@ -35,6 +36,7 @@ ...@@ -35,6 +36,7 @@
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */ /* We borrow bit 23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
...@@ -74,8 +76,8 @@ ...@@ -74,8 +76,8 @@
#define __READABLE (_PAGE_VALID) #define __READABLE (_PAGE_VALID)
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) #define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) #define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) #define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
_PAGE_USER | _CACHE_CC) _PAGE_USER | _CACHE_CC)
......
...@@ -424,6 +424,9 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; ...@@ -424,6 +424,9 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL;
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); }
static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
#define pte_accessible pte_accessible #define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{ {
...@@ -558,6 +561,17 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) ...@@ -558,6 +561,17 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd; return pmd;
} }
static inline int pmd_devmap(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_DEVMAP;
return pmd;
}
static inline struct page *pmd_page(pmd_t pmd) static inline struct page *pmd_page(pmd_t pmd)
{ {
if (pmd_trans_huge(pmd)) if (pmd_trans_huge(pmd))
...@@ -613,6 +627,11 @@ static inline long pmd_protnone(pmd_t pmd) ...@@ -613,6 +627,11 @@ static inline long pmd_protnone(pmd_t pmd)
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pud_devmap(pud) (0)
#define pgd_devmap(pgd) (0)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* /*
* We provide our own get_unmapped area to cope with the virtual aliasing * We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture. * constraints placed on us by the cache architecture.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment