Commit e5098611 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: cleanup page table definitions

Improve the encoding of the different pte types and the naming of the
page, segment table and region table bits. Due to the different pte
encoding the hugetlbfs primitives need to be adapted as well. To improve
compatability with common code make the huge ptes use the encoding of
normal ptes. The conversion between the pte and pmd encoding for a huge
pte is done with set_huge_pte_at and huge_ptep_get.
Overall the code is now easier to understand.
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 416fd0ff
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte); pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
/* /*
* If the arch doesn't supply something else, assume that hugepage * If the arch doesn't supply something else, assume that hugepage
...@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file, ...@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
int arch_prepare_hugepage(struct page *page); int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page); void arch_release_hugepage(struct page *page);
static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{ {
pte_val(pte) |= _PAGE_RO; pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
return pte;
} }
static inline int huge_pte_none(pte_t pte) static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ {
return (pte_val(pte) & _SEGMENT_ENTRY_INV) && huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
!(pte_val(pte) & _SEGMENT_ENTRY_RO);
} }
static inline pte_t huge_ptep_get(pte_t *ptep) static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{ {
pte_t pte = *ptep; int changed = !pte_same(huge_ptep_get(ptep), pte);
unsigned long mask; if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
if (!MACHINE_HAS_HPAGE) { set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
if (ptep) {
mask = pte_val(pte) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte = pte_mkhuge(*ptep);
pte_val(pte) |= mask;
}
} }
return pte; return changed;
} }
static inline void __pmd_csp(pmd_t *pmdp) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{ {
register unsigned long reg2 asm("2") = pmd_val(*pmdp); pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
_SEGMENT_ENTRY_INV;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
} }
static inline void huge_ptep_invalidate(struct mm_struct *mm, static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
unsigned long address, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
huge_ptep_invalidate(mm, addr, ptep);
return pte;
}
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
if (__changed) { \
huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
})
#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (huge_pte_write(__pte)) { \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
})
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ {
huge_ptep_invalidate(vma->vm_mm, address, ptep); return mk_pte(page, pgprot);
} }
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) static inline int huge_pte_none(pte_t pte)
{ {
pte_t pte; return pte_none(pte);
pmd_t pmd;
pmd = mk_pmd_phys(page_to_phys(page), pgprot);
pte_val(pte) = pmd_val(pmd);
return pte;
} }
static inline int huge_pte_write(pte_t pte) static inline int huge_pte_write(pte_t pte)
{ {
pmd_t pmd; return pte_write(pte);
pmd_val(pmd) = pte_val(pte);
return pmd_write(pmd);
} }
static inline int huge_pte_dirty(pte_t pte) static inline int huge_pte_dirty(pte_t pte)
{ {
/* No dirty bit in the segment table entry. */ return pte_dirty(pte);
return 0;
} }
static inline pte_t huge_pte_mkwrite(pte_t pte) static inline pte_t huge_pte_mkwrite(pte_t pte)
{ {
pmd_t pmd; return pte_mkwrite(pte);
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
return pte;
} }
static inline pte_t huge_pte_mkdirty(pte_t pte) static inline pte_t huge_pte_mkdirty(pte_t pte)
{ {
/* No dirty bit in the segment table entry. */ return pte_mkdirty(pte);
return pte;
} }
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t huge_pte_wrprotect(pte_t pte)
{ {
pmd_t pmd; return pte_wrprotect(pte);
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
return pte;
} }
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
pte_t *ptep)
{ {
pmd_clear((pmd_t *) ptep); return pte_modify(pte, newprot);
} }
#endif /* _ASM_S390_HUGETLB_H */ #endif /* _ASM_S390_HUGETLB_H */
...@@ -217,63 +217,50 @@ extern unsigned long MODULES_END; ...@@ -217,63 +217,50 @@ extern unsigned long MODULES_END;
/* Hardware bits in the page table entry */ /* Hardware bits in the page table entry */
#define _PAGE_CO 0x100 /* HW Change-bit override */ #define _PAGE_CO 0x100 /* HW Change-bit override */
#define _PAGE_RO 0x200 /* HW read-only bit */ #define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */ #define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
/* Software bits in the page table entry */ /* Software bits in the page table entry */
#define _PAGE_SWT 0x001 /* SW pte type bit t */ #define _PAGE_PRESENT 0x001 /* SW pte present bit */
#define _PAGE_SWX 0x002 /* SW pte type bit x */ #define _PAGE_TYPE 0x002 /* SW pte type bit */
#define _PAGE_SWC 0x004 /* SW pte changed bit */ #define _PAGE_YOUNG 0x004 /* SW pte young bit */
#define _PAGE_SWR 0x008 /* SW pte referenced bit */ #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
#define _PAGE_SWW 0x010 /* SW pte write bit */ #define _PAGE_WRITE 0x010 /* SW pte write bit */
#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ #define _PAGE_SPECIAL 0x020 /* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL #define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */ /* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
_PAGE_SWC | _PAGE_SWR) _PAGE_DIRTY | _PAGE_YOUNG)
/* Six different types of pages. */
#define _PAGE_TYPE_EMPTY 0x400
#define _PAGE_TYPE_NONE 0x401
#define _PAGE_TYPE_SWAP 0x403
#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
#define _PAGE_TYPE_RO 0x200
#define _PAGE_TYPE_RW 0x000
/*
* Only four types for huge pages, using the invalid bit and protection bit
* of a segment table entry.
*/
#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
#define _HPAGE_TYPE_NONE 0x220
#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
#define _HPAGE_TYPE_RW 0x000
/* /*
* PTE type bits are rather complicated. handle_pte_fault uses pte_present, * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
* pte_none and pte_file to find out the pte type WITHOUT holding the page * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
* table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to * is used to distinguish present from not-present ptes. It is changed only
* invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs * with the page table lock held.
* for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
* This change is done while holding the lock, but the intermediate step
* of a previously valid pte with the hw invalid bit set can be observed by
* handle_pte_fault. That makes it necessary that all valid pte types with
* the hw invalid bit set must be distinguishable from the four pte types
* empty, none, swap and file.
* *
* irxt ipte irxt * The following table gives the different possible bit combinations for
* _PAGE_TYPE_EMPTY 1000 -> 1000 * the pte hardware and software bits in the last 12 bits of a pte:
* _PAGE_TYPE_NONE 1001 -> 1001
* _PAGE_TYPE_SWAP 1011 -> 1011
* _PAGE_TYPE_FILE 11?1 -> 11?1
* _PAGE_TYPE_RO 0100 -> 1100
* _PAGE_TYPE_RW 0000 -> 1000
* *
* pte_none is true for bits combinations 1000, 1010, 1100, 1110 * 842100000000
* pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 * 000084210000
* pte_file is true for bits combinations 1101, 1111 * 000000008421
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. * .IR....wdytp
* empty .10....00000
* swap .10....xxx10
* file .11....xxxx0
* prot-none, clean .11....00x01
* prot-none, dirty .10....01x01
* read-only, clean .01....00x01
* read-only, dirty .01....01x01
* read-write, clean .01....10x01
* read-write, dirty .00....11x01
*
* pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
* pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
* pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
* pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/ */
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
...@@ -287,13 +274,13 @@ extern unsigned long MODULES_END; ...@@ -287,13 +274,13 @@ extern unsigned long MODULES_END;
/* Bits in the segment table entry */ /* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
/* Page status table bits for virtualization */ /* Page status table bits for virtualization */
#define PGSTE_ACC_BITS 0xf0000000UL #define PGSTE_ACC_BITS 0xf0000000UL
...@@ -324,8 +311,8 @@ extern unsigned long MODULES_END; ...@@ -324,8 +311,8 @@ extern unsigned long MODULES_END;
/* Bits in the region table entry */ /* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
#define _REGION_ENTRY_RO 0x200 /* region protection bit */ #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
...@@ -333,11 +320,11 @@ extern unsigned long MODULES_END; ...@@ -333,11 +320,11 @@ extern unsigned long MODULES_END;
#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
...@@ -346,16 +333,17 @@ extern unsigned long MODULES_END; ...@@ -346,16 +333,17 @@ extern unsigned long MODULES_END;
/* Bits in the segment table entry */ /* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY (0) #define _SEGMENT_ENTRY (0)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
/* Set of bits not changed in pmd_modify */ /* Set of bits not changed in pmd_modify */
#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
...@@ -386,14 +374,13 @@ extern unsigned long MODULES_END; ...@@ -386,14 +374,13 @@ extern unsigned long MODULES_END;
/* /*
* Page protection definitions. * Page protection definitions.
*/ */
#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
#define PAGE_RO __pgprot(_PAGE_TYPE_RO) #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_PROTECT)
#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
#define PAGE_KERNEL PAGE_RWC #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
#define PAGE_SHARED PAGE_KERNEL #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
#define PAGE_COPY PAGE_RO #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
/* /*
* On s390 the page table entry has an invalid bit and a read-only bit. * On s390 the page table entry has an invalid bit and a read-only bit.
...@@ -402,29 +389,30 @@ extern unsigned long MODULES_END; ...@@ -402,29 +389,30 @@ extern unsigned long MODULES_END;
*/ */
/*xwr*/ /*xwr*/
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_RO #define __P001 PAGE_READ
#define __P010 PAGE_RO #define __P010 PAGE_READ
#define __P011 PAGE_RO #define __P011 PAGE_READ
#define __P100 PAGE_RO #define __P100 PAGE_READ
#define __P101 PAGE_RO #define __P101 PAGE_READ
#define __P110 PAGE_RO #define __P110 PAGE_READ
#define __P111 PAGE_RO #define __P111 PAGE_READ
#define __S000 PAGE_NONE #define __S000 PAGE_NONE
#define __S001 PAGE_RO #define __S001 PAGE_READ
#define __S010 PAGE_RW #define __S010 PAGE_WRITE
#define __S011 PAGE_RW #define __S011 PAGE_WRITE
#define __S100 PAGE_RO #define __S100 PAGE_READ
#define __S101 PAGE_RO #define __S101 PAGE_READ
#define __S110 PAGE_RW #define __S110 PAGE_WRITE
#define __S111 PAGE_RW #define __S111 PAGE_WRITE
/* /*
* Segment entry (large page) protection definitions. * Segment entry (large page) protection definitions.
*/ */
#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) _SEGMENT_ENTRY_PROTECT)
#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
#define SEGMENT_WRITE __pgprot(0)
static inline int mm_exclusive(struct mm_struct *mm) static inline int mm_exclusive(struct mm_struct *mm)
{ {
...@@ -467,7 +455,7 @@ static inline int pgd_none(pgd_t pgd) ...@@ -467,7 +455,7 @@ static inline int pgd_none(pgd_t pgd)
{ {
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
return 0; return 0;
return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
} }
static inline int pgd_bad(pgd_t pgd) static inline int pgd_bad(pgd_t pgd)
...@@ -478,7 +466,7 @@ static inline int pgd_bad(pgd_t pgd) ...@@ -478,7 +466,7 @@ static inline int pgd_bad(pgd_t pgd)
* invalid for either table entry. * invalid for either table entry.
*/ */
unsigned long mask = unsigned long mask =
~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pgd_val(pgd) & mask) != 0; return (pgd_val(pgd) & mask) != 0;
} }
...@@ -494,7 +482,7 @@ static inline int pud_none(pud_t pud) ...@@ -494,7 +482,7 @@ static inline int pud_none(pud_t pud)
{ {
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
return 0; return 0;
return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
} }
static inline int pud_large(pud_t pud) static inline int pud_large(pud_t pud)
...@@ -512,7 +500,7 @@ static inline int pud_bad(pud_t pud) ...@@ -512,7 +500,7 @@ static inline int pud_bad(pud_t pud)
* invalid for either table entry. * invalid for either table entry.
*/ */
unsigned long mask = unsigned long mask =
~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pud_val(pud) & mask) != 0; return (pud_val(pud) & mask) != 0;
} }
...@@ -521,21 +509,18 @@ static inline int pud_bad(pud_t pud) ...@@ -521,21 +509,18 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
!(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
} }
static inline int pmd_none(pmd_t pmd) static inline int pmd_none(pmd_t pmd)
{ {
return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
!(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
} }
static inline int pmd_large(pmd_t pmd) static inline int pmd_large(pmd_t pmd)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
#else #else
return 0; return 0;
#endif #endif
...@@ -543,7 +528,7 @@ static inline int pmd_large(pmd_t pmd) ...@@ -543,7 +528,7 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd) static inline int pmd_bad(pmd_t pmd)
{ {
unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INVALID;
return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
} }
...@@ -563,7 +548,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, ...@@ -563,7 +548,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
#define __HAVE_ARCH_PMD_WRITE #define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd) static inline int pmd_write(pmd_t pmd)
{ {
return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
} }
static inline int pmd_young(pmd_t pmd) static inline int pmd_young(pmd_t pmd)
...@@ -571,23 +556,23 @@ static inline int pmd_young(pmd_t pmd) ...@@ -571,23 +556,23 @@ static inline int pmd_young(pmd_t pmd)
return 0; return 0;
} }
static inline int pte_none(pte_t pte) static inline int pte_present(pte_t pte)
{ {
return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); /* Bit pattern: (pte & 0x001) == 0x001 */
return (pte_val(pte) & _PAGE_PRESENT) != 0;
} }
static inline int pte_present(pte_t pte) static inline int pte_none(pte_t pte)
{ {
unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; /* Bit pattern: pte == 0x400 */
return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || return pte_val(pte) == _PAGE_INVALID;
(!(pte_val(pte) & _PAGE_INVALID) &&
!(pte_val(pte) & _PAGE_SWT));
} }
static inline int pte_file(pte_t pte) static inline int pte_file(pte_t pte)
{ {
unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; /* Bit pattern: (pte & 0x601) == 0x600 */
return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
== (_PAGE_INVALID | _PAGE_PROTECT);
} }
static inline int pte_special(pte_t pte) static inline int pte_special(pte_t pte)
...@@ -695,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) ...@@ -695,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
/* Transfer referenced bit to kvm user bits and pte */ /* Transfer referenced bit to kvm user bits and pte */
if (young) { if (young) {
pgste_val(pgste) |= PGSTE_UR_BIT; pgste_val(pgste) |= PGSTE_UR_BIT;
pte_val(*ptep) |= _PAGE_SWR; pte_val(*ptep) |= _PAGE_YOUNG;
} }
#endif #endif
return pgste; return pgste;
...@@ -723,13 +708,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) ...@@ -723,13 +708,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
static inline void pgste_set_pte(pte_t *ptep, pte_t entry) static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
{ {
if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
/* /*
* Without enhanced suppression-on-protection force * Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes. * the dirty bit on for all writable ptes.
*/ */
pte_val(entry) |= _PAGE_SWC; pte_val(entry) |= _PAGE_DIRTY;
pte_val(entry) &= ~_PAGE_RO; pte_val(entry) &= ~_PAGE_PROTECT;
} }
*ptep = entry; *ptep = entry;
} }
...@@ -841,18 +826,18 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -841,18 +826,18 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
*/ */
static inline int pte_write(pte_t pte) static inline int pte_write(pte_t pte)
{ {
return (pte_val(pte) & _PAGE_SWW) != 0; return (pte_val(pte) & _PAGE_WRITE) != 0;
} }
static inline int pte_dirty(pte_t pte) static inline int pte_dirty(pte_t pte)
{ {
return (pte_val(pte) & _PAGE_SWC) != 0; return (pte_val(pte) & _PAGE_DIRTY) != 0;
} }
static inline int pte_young(pte_t pte) static inline int pte_young(pte_t pte)
{ {
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
if (pte_val(pte) & _PAGE_SWR) if (pte_val(pte) & _PAGE_YOUNG)
return 1; return 1;
#endif #endif
return 0; return 0;
...@@ -880,12 +865,12 @@ static inline void pud_clear(pud_t *pud) ...@@ -880,12 +865,12 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp) static inline void pmd_clear(pmd_t *pmdp)
{ {
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
} }
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
} }
/* /*
...@@ -896,49 +881,45 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -896,49 +881,45 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot); pte_val(pte) |= pgprot_val(newprot);
if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
pte_val(pte) &= ~_PAGE_RO; pte_val(pte) &= ~_PAGE_PROTECT;
return pte; return pte;
} }
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
pte_val(pte) &= ~_PAGE_SWW; pte_val(pte) &= ~_PAGE_WRITE;
/* Do not clobber _PAGE_TYPE_NONE pages! */ pte_val(pte) |= _PAGE_PROTECT;
if (!(pte_val(pte) & _PAGE_INVALID))
pte_val(pte) |= _PAGE_RO;
return pte; return pte;
} }
static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte)
{ {
pte_val(pte) |= _PAGE_SWW; pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_SWC) if (pte_val(pte) & _PAGE_DIRTY)
pte_val(pte) &= ~_PAGE_RO; pte_val(pte) &= ~_PAGE_PROTECT;
return pte; return pte;
} }
static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkclean(pte_t pte)
{ {
pte_val(pte) &= ~_PAGE_SWC; pte_val(pte) &= ~_PAGE_DIRTY;
/* Do not clobber _PAGE_TYPE_NONE pages! */ pte_val(pte) |= _PAGE_PROTECT;
if (!(pte_val(pte) & _PAGE_INVALID))
pte_val(pte) |= _PAGE_RO;
return pte; return pte;
} }
static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte)
{ {
pte_val(pte) |= _PAGE_SWC; pte_val(pte) |= _PAGE_DIRTY;
if (pte_val(pte) & _PAGE_SWW) if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) &= ~_PAGE_RO; pte_val(pte) &= ~_PAGE_PROTECT;
return pte; return pte;
} }
static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkold(pte_t pte)
{ {
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
pte_val(pte) &= ~_PAGE_SWR; pte_val(pte) &= ~_PAGE_YOUNG;
#endif #endif
return pte; return pte;
} }
...@@ -957,7 +938,7 @@ static inline pte_t pte_mkspecial(pte_t pte) ...@@ -957,7 +938,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); pte_val(pte) |= _PAGE_LARGE;
return pte; return pte;
} }
#endif #endif
...@@ -1076,7 +1057,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, ...@@ -1076,7 +1057,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
pte = *ptep; pte = *ptep;
if (!mm_exclusive(mm)) if (!mm_exclusive(mm))
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste); pgste = pgste_update_all(&pte, pgste);
...@@ -1139,7 +1120,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, ...@@ -1139,7 +1120,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte = *ptep; pte = *ptep;
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(vma->vm_mm)) { if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_update_all(&pte, pgste); pgste = pgste_update_all(&pte, pgste);
...@@ -1172,7 +1153,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, ...@@ -1172,7 +1153,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
pte = *ptep; pte = *ptep;
if (!full) if (!full)
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste); pgste = pgste_update_all(&pte, pgste);
...@@ -1248,10 +1229,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) ...@@ -1248,10 +1229,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
unsigned long physpage = page_to_phys(page); unsigned long physpage = page_to_phys(page);
pte_t __pte = mk_pte_phys(physpage, pgprot); pte_t __pte = mk_pte_phys(physpage, pgprot);
if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { if (pte_write(__pte) && PageDirty(page))
pte_val(__pte) |= _PAGE_SWC; __pte = pte_mkdirty(__pte);
pte_val(__pte) &= ~_PAGE_RO;
}
return __pte; return __pte;
} }
...@@ -1313,7 +1292,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) ...@@ -1313,7 +1292,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
unsigned long sto = (unsigned long) pmdp - unsigned long sto = (unsigned long) pmdp -
pmd_index(address) * sizeof(pmd_t); pmd_index(address) * sizeof(pmd_t);
if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
asm volatile( asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0" " .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp) : "=m" (*pmdp)
...@@ -1324,18 +1303,31 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) ...@@ -1324,18 +1303,31 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
} }
} }
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
_SEGMENT_ENTRY_INVALID;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{ {
/* /*
* pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
* Convert to segment table entry format. * Convert to segment table entry format.
*/ */
if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
return pgprot_val(SEGMENT_NONE); return pgprot_val(SEGMENT_NONE);
if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
return pgprot_val(SEGMENT_RO); return pgprot_val(SEGMENT_READ);
return pgprot_val(SEGMENT_RW); return pgprot_val(SEGMENT_WRITE);
} }
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
...@@ -1354,9 +1346,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) ...@@ -1354,9 +1346,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
static inline pmd_t pmd_mkwrite(pmd_t pmd) static inline pmd_t pmd_mkwrite(pmd_t pmd)
{ {
/* Do not clobber _HPAGE_TYPE_NONE pages! */ /* Do not clobber PROT_NONE pages! */
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INVALID))
pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd; return pmd;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
...@@ -1378,7 +1370,7 @@ static inline int pmd_trans_splitting(pmd_t pmd) ...@@ -1378,7 +1370,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry) pmd_t *pmdp, pmd_t entry)
{ {
if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
pmd_val(entry) |= _SEGMENT_ENTRY_CO; pmd_val(entry) |= _SEGMENT_ENTRY_CO;
*pmdp = entry; *pmdp = entry;
} }
...@@ -1391,7 +1383,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) ...@@ -1391,7 +1383,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmd_wrprotect(pmd_t pmd) static inline pmd_t pmd_wrprotect(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_RO; pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd; return pmd;
} }
...@@ -1510,10 +1502,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd) ...@@ -1510,10 +1502,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
* exception will occur instead of a page translation exception. The * exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary * specifiation exception has the bad habit not to store necessary
* information in the lowcore. * information in the lowcore.
* Bit 21 and bit 22 are the page invalid bit and the page protection * Bits 21, 22, 30 and 31 are used to indicate the page type.
* bit. We set both to indicate a swapped page. * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
* Bit 30 and 31 are used to distinguish the different page types. For
* a swapped page these bits need to be zero.
* This leaves the bits 1-19 and bits 24-29 to store type and offset. * This leaves the bits 1-19 and bits 24-29 to store type and offset.
* We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
* plus 24 for the offset. * plus 24 for the offset.
...@@ -1527,10 +1517,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd) ...@@ -1527,10 +1517,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
* exception will occur instead of a page translation exception. The * exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary * specifiation exception has the bad habit not to store necessary
* information in the lowcore. * information in the lowcore.
* Bit 53 and bit 54 are the page invalid bit and the page protection * Bits 53, 54, 62 and 63 are used to indicate the page type.
* bit. We set both to indicate a swapped page. * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
* Bit 62 and 63 are used to distinguish the different page types. For
* a swapped page these bits need to be zero.
* This leaves the bits 0-51 and bits 56-61 to store type and offset. * This leaves the bits 0-51 and bits 56-61 to store type and offset.
* We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
* plus 56 for the offset. * plus 56 for the offset.
...@@ -1547,7 +1535,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -1547,7 +1535,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ {
pte_t pte; pte_t pte;
offset &= __SWP_OFFSET_MASK; offset &= __SWP_OFFSET_MASK;
pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
((offset & 1UL) << 7) | ((offset & ~1UL) << 11); ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
return pte; return pte;
} }
...@@ -1570,7 +1558,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -1570,7 +1558,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define pgoff_to_pte(__off) \ #define pgoff_to_pte(__off) \
((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
| _PAGE_TYPE_FILE }) | _PAGE_INVALID | _PAGE_PROTECT })
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) ...@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER); PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) page_table, _PAGE_INVALID,
256*sizeof(unsigned long)); 256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame; *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long)); psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32; aste = psal + 32;
......
...@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1: case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff); table = table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x39UL; return -0x39UL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_REGION2: case _ASCE_TYPE_REGION2:
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3aUL; return -0x3aUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_REGION3: case _ASCE_TYPE_REGION3:
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3bUL; return -0x3bUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_SEGMENT: case _ASCE_TYPE_SEGMENT:
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL; return -0x10UL;
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
if (write && (*table & _SEGMENT_ENTRY_RO)) if (write && (*table & _SEGMENT_ENTRY_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
...@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
table = table + ((address >> 12) & 0xff); table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID)) if (unlikely(*table & _PAGE_INVALID))
return -0x11UL; return -0x11UL;
if (write && (*table & _PAGE_RO)) if (write && (*table & _PAGE_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK); return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
} }
...@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
unsigned long *table = (unsigned long *)__pa(mm->pgd); unsigned long *table = (unsigned long *)__pa(mm->pgd);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL; return -0x10UL;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
table = table + ((address >> 12) & 0xff); table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID)) if (unlikely(*table & _PAGE_INVALID))
return -0x11UL; return -0x11UL;
if (write && (*table & _PAGE_RO)) if (write && (*table & _PAGE_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK); return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
} }
......
...@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level) ...@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n"); seq_printf(m, "I\n");
return; return;
} }
seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
...@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st, ...@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} }
/* /*
* The actual page table walker functions. In order to keep the implementation * The actual page table walker functions. In order to keep the
* of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO * implementation of print_prot() short, we only check and pass
* flags to note_page() if a region, segment or page table entry is invalid or * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
* read-only. * segment or page table entry is invalid or read-only.
* After all it's just a hint that the current level being walked contains an * After all it's just a hint that the current level being walked
* invalid or read-only entry. * contains an invalid or read-only entry.
*/ */
static void walk_pte_level(struct seq_file *m, struct pg_state *st, static void walk_pte_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd, unsigned long addr) pmd_t *pmd, unsigned long addr)
...@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, ...@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr; st->current_address = addr;
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
note_page(m, st, prot, 4); note_page(m, st, prot, 4);
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) #define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
#else #else
#define _PMD_PROT_MASK 0 #define _PMD_PROT_MASK 0
#endif #endif
......
...@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
pte_t *ptep, pte; pte_t *ptep, pte;
struct page *page; struct page *page;
mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do { do {
...@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail; struct page *head, *page, *tail;
int refs; int refs;
result = write ? 0 : _SEGMENT_ENTRY_RO; result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
mask = result | _SEGMENT_ENTRY_INV; mask = result | _SEGMENT_ENTRY_INVALID;
if ((pmd_val(pmd) & mask) != result) if ((pmd_val(pmd) & mask) != result)
return 0; return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
......
...@@ -8,21 +8,107 @@ ...@@ -8,21 +8,107 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
static inline pmd_t __pte_to_pmd(pte_t pte)
{
int none, prot;
pmd_t pmd;
/*
* Convert encoding pte bits pmd bits
* .IR.....wdtp ..R...I.....
* empty .10.....0000 -> ..0...1.....
* prot-none, clean .11.....0001 -> ..1...1.....
* prot-none, dirty .10.....0101 -> ..1...1.....
* read-only, clean .01.....0001 -> ..1...0.....
* read-only, dirty .01.....0101 -> ..1...0.....
* read-write, clean .01.....1001 -> ..0...0.....
* read-write, dirty .00.....1101 -> ..0...0.....
* Huge ptes are dirty by definition, a clean pte is made dirty
* by the conversion.
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
if (pte_val(pte) & _PAGE_INVALID)
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
none = (pte_val(pte) & _PAGE_PRESENT) &&
(pte_val(pte) & _PAGE_INVALID);
prot = (pte_val(pte) & _PAGE_PROTECT);
if (prot || none)
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
} else
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* ..R...I..... .IR.....wdtp
* empty ..0...1..... -> .10.....0000
* prot-none, young ..1...1..... -> .10.....0101
* read-only, young ..1...0..... -> .01.....0101
* read-write, young ..0...0..... -> .00.....1101
* Huge ptes are dirty by definition
*/
if (pmd_present(pmd)) {
pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
(pmd_val(pmd) & PAGE_MASK);
if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
pte_val(pte) |= _PAGE_INVALID;
else {
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_PROTECT;
else
pte_val(pte) |= _PAGE_WRITE;
}
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval) pte_t *ptep, pte_t pte)
{ {
pmd_t *pmdp = (pmd_t *) pteptr; pmd_t pmd;
unsigned long mask;
pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) { if (!MACHINE_HAS_HPAGE) {
pteptr = (pte_t *) pte_page(pteval)[1].index; pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
mask = pte_val(pteval) & pmd_val(pmd) |= pte_page(pte)[1].index;
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); } else
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long origin;
pmd_t pmd;
pmd = *(pmd_t *) ptep;
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= *(unsigned long *) origin;
} }
return __pmd_to_pte(pmd);
}
pmd_val(*pmdp) = pte_val(pteval); pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep);
if (MACHINE_HAS_IDTE)
__pmd_idte(addr, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte;
} }
int arch_prepare_hugepage(struct page *page) int arch_prepare_hugepage(struct page *page)
...@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page) ...@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index; ptep = (pte_t *) page[1].index;
if (!ptep) if (!ptep)
return; return;
clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t)); PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep); page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0; page[1].index = 0;
......
...@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) ...@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
if (!enable) { if (!enable) {
__ptep_ipte(address, pte); __ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY; pte_val(*pte) = _PAGE_INVALID;
continue; continue;
} }
pte_val(*pte) = __pa(address); pte_val(*pte) = __pa(address);
......
...@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) ...@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
struct gmap_rmap *rmap; struct gmap_rmap *rmap;
struct page *page; struct page *page;
if (*table & _SEGMENT_ENTRY_INV) if (*table & _SEGMENT_ENTRY_INVALID)
return 0; return 0;
page = pfn_to_page(*table >> PAGE_SHIFT); page = pfn_to_page(*table >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
...@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) ...@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
kfree(rmap); kfree(rmap);
break; break;
} }
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
return 1; return 1;
} }
...@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap, ...@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
return -ENOMEM; return -ENOMEM;
new = (unsigned long *) page_to_phys(page); new = (unsigned long *) page_to_phys(page);
crst_table_init(new, init); crst_table_init(new, init);
if (*table & _REGION_ENTRY_INV) { if (*table & _REGION_ENTRY_INVALID) {
list_add(&page->lru, &gmap->crst_list); list_add(&page->lru, &gmap->crst_list);
*table = (unsigned long) new | _REGION_ENTRY_LENGTH | *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK); (*table & _REGION_ENTRY_TYPE_MASK);
...@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) ...@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
for (off = 0; off < len; off += PMD_SIZE) { for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the guest addr space page table */ /* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff); table = gmap->table + (((to + off) >> 53) & 0x7ff);
if (*table & _REGION_ENTRY_INV) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff); table = table + (((to + off) >> 42) & 0x7ff);
if (*table & _REGION_ENTRY_INV) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff); table = table + (((to + off) >> 31) & 0x7ff);
if (*table & _REGION_ENTRY_INV) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 20) & 0x7ff); table = table + (((to + off) >> 20) & 0x7ff);
/* Clear segment table entry in guest address space. */ /* Clear segment table entry in guest address space. */
flush |= gmap_unlink_segment(gmap, table); flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV; *table = _SEGMENT_ENTRY_INVALID;
} }
out: out:
spin_unlock(&gmap->mm->page_table_lock); spin_unlock(&gmap->mm->page_table_lock);
...@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, ...@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
for (off = 0; off < len; off += PMD_SIZE) { for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the gmap address space page table */ /* Walk the gmap address space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff); table = gmap->table + (((to + off) >> 53) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) && if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
goto out_unmap; goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff); table = table + (((to + off) >> 42) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) && if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
goto out_unmap; goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff); table = table + (((to + off) >> 31) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) && if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
goto out_unmap; goto out_unmap;
table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
...@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, ...@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
/* Store 'from' address in an invalid segment table entry. */ /* Store 'from' address in an invalid segment table entry. */
flush |= gmap_unlink_segment(gmap, table); flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
_SEGMENT_ENTRY_PROTECT);
} }
spin_unlock(&gmap->mm->page_table_lock); spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem); up_read(&gmap->mm->mmap_sem);
...@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) ...@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
unsigned long *table; unsigned long *table;
table = gmap->table + ((address >> 53) & 0x7ff); table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
...@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) ...@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
return PTR_ERR(segment_ptr); return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */ /* Convert the gmap address to an mm address. */
segment = *segment_ptr; segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) { if (!(segment & _SEGMENT_ENTRY_INVALID)) {
page = pfn_to_page(segment >> PAGE_SHIFT); page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK); return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) { } else if (segment & _SEGMENT_ENTRY_PROTECT) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
return vmaddr | (address & ~PMD_MASK); return vmaddr | (address & ~PMD_MASK);
} }
...@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) ...@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
list_for_each_entry_safe(rmap, next, &mp->mapper, list) { list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
*rmap->entry = *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; _SEGMENT_ENTRY_PROTECT);
list_del(&rmap->list); list_del(&rmap->list);
kfree(rmap); kfree(rmap);
flush = 1; flush = 1;
...@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) ...@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
/* Convert the gmap address to an mm address. */ /* Convert the gmap address to an mm address. */
while (1) { while (1) {
segment = *segment_ptr; segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) { if (!(segment & _SEGMENT_ENTRY_INVALID)) {
/* Page table is present */ /* Page table is present */
page = pfn_to_page(segment >> PAGE_SHIFT); page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK); return mp->vmaddr | (address & ~PMD_MASK);
} }
if (!(segment & _SEGMENT_ENTRY_RO)) if (!(segment & _SEGMENT_ENTRY_PROTECT))
/* Nothing mapped in the gmap address space. */ /* Nothing mapped in the gmap address space. */
break; break;
rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
...@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) ...@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
while (address < to) { while (address < to) {
/* Walk the gmap address space page table */ /* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff); table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) { if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) { if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) { if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) { if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
...@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) ...@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
continue; continue;
/* Set notification bit in the pgste of the pte */ /* Set notification bit in the pgste of the pte */
entry = *ptep; entry = *ptep;
if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
pgste = pgste_get_lock(ptep); pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT; pgste_val(pgste) |= PGSTE_IN_BIT;
pgste_set_unlock(ptep, pgste); pgste_set_unlock(ptep, pgste);
...@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, ...@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
page->index = (unsigned long) mp; page->index = (unsigned long) mp;
atomic_set(&page->_mapcount, 3); atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
return table; return table;
} }
...@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) ...@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
pgtable_page_ctor(page); pgtable_page_ctor(page);
atomic_set(&page->_mapcount, 1); atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
} else { } else {
...@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) ...@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
list_del(lh); list_del(lh);
} }
ptep = (pte_t *) pgtable; ptep = (pte_t *) pgtable;
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
ptep++; ptep++;
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
return pgtable; return pgtable;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
...@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) ...@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte) if (!pte)
return NULL; return NULL;
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t)); PTRS_PER_PTE * sizeof(pte_t));
return pte; return pte;
} }
...@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) | pud_val(*pu_dir) = __pa(address) |
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
(ro ? _REGION_ENTRY_RO : 0); (ro ? _REGION_ENTRY_PROTECT : 0);
address += PUD_SIZE; address += PUD_SIZE;
continue; continue;
} }
...@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) | pmd_val(*pm_dir) = __pa(address) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
(ro ? _SEGMENT_ENTRY_RO : 0); (ro ? _SEGMENT_ENTRY_PROTECT : 0);
address += PMD_SIZE; address += PMD_SIZE;
continue; continue;
} }
...@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
} }
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); pte_val(*pt_dir) = __pa(address) |
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
address += PAGE_SIZE; address += PAGE_SIZE;
} }
ret = 0; ret = 0;
...@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) ...@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_t *pt_dir; pte_t *pt_dir;
pte_t pte; pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY; pte_val(pte) = _PAGE_INVALID;
while (address < end) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) { if (pgd_none(*pg_dir)) {
...@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page =__pa(vmem_alloc_pages(0)); new_page =__pa(vmem_alloc_pages(0));
if (!new_page) if (!new_page)
goto out; goto out;
pte_val(*pt_dir) = __pa(new_page); pte_val(*pt_dir) =
__pa(new_page) | pgprot_val(PAGE_KERNEL);
} }
address += PAGE_SIZE; address += PAGE_SIZE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment