Commit e5098611 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: cleanup page table definitions

Improve the encoding of the different pte types and the naming of the
page, segment table and region table bits. Due to the different pte
encoding the hugetlbfs primitives need to be adapted as well. To improve
compatability with common code make the huge ptes use the encoding of
normal ptes. The conversion between the pte and pmd encoding for a huge
pte is done with set_huge_pte_at and huge_ptep_get.
Overall the code is now easier to understand.
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 416fd0ff
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte); pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
/* /*
* If the arch doesn't supply something else, assume that hugepage * If the arch doesn't supply something else, assume that hugepage
...@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file, ...@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
int arch_prepare_hugepage(struct page *page); int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page); void arch_release_hugepage(struct page *page);
static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{ {
pte_val(pte) |= _PAGE_RO; pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
return pte;
} }
static inline int huge_pte_none(pte_t pte) static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ {
return (pte_val(pte) & _SEGMENT_ENTRY_INV) && huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
!(pte_val(pte) & _SEGMENT_ENTRY_RO);
} }
static inline pte_t huge_ptep_get(pte_t *ptep) static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{ {
pte_t pte = *ptep; int changed = !pte_same(huge_ptep_get(ptep), pte);
unsigned long mask; if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
if (!MACHINE_HAS_HPAGE) { set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
if (ptep) {
mask = pte_val(pte) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte = pte_mkhuge(*ptep);
pte_val(pte) |= mask;
}
} }
return pte; return changed;
} }
static inline void __pmd_csp(pmd_t *pmdp) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{ {
register unsigned long reg2 asm("2") = pmd_val(*pmdp); pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
_SEGMENT_ENTRY_INV;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
} }
static inline void huge_ptep_invalidate(struct mm_struct *mm, static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
unsigned long address, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
huge_ptep_invalidate(mm, addr, ptep);
return pte;
}
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
if (__changed) { \
huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
})
#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (huge_pte_write(__pte)) { \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
})
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ {
huge_ptep_invalidate(vma->vm_mm, address, ptep); return mk_pte(page, pgprot);
} }
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) static inline int huge_pte_none(pte_t pte)
{ {
pte_t pte; return pte_none(pte);
pmd_t pmd;
pmd = mk_pmd_phys(page_to_phys(page), pgprot);
pte_val(pte) = pmd_val(pmd);
return pte;
} }
static inline int huge_pte_write(pte_t pte) static inline int huge_pte_write(pte_t pte)
{ {
pmd_t pmd; return pte_write(pte);
pmd_val(pmd) = pte_val(pte);
return pmd_write(pmd);
} }
static inline int huge_pte_dirty(pte_t pte) static inline int huge_pte_dirty(pte_t pte)
{ {
/* No dirty bit in the segment table entry. */ return pte_dirty(pte);
return 0;
} }
static inline pte_t huge_pte_mkwrite(pte_t pte) static inline pte_t huge_pte_mkwrite(pte_t pte)
{ {
pmd_t pmd; return pte_mkwrite(pte);
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
return pte;
} }
static inline pte_t huge_pte_mkdirty(pte_t pte) static inline pte_t huge_pte_mkdirty(pte_t pte)
{ {
/* No dirty bit in the segment table entry. */ return pte_mkdirty(pte);
return pte;
} }
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t huge_pte_wrprotect(pte_t pte)
{ {
pmd_t pmd; return pte_wrprotect(pte);
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
return pte;
} }
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
pte_t *ptep)
{ {
pmd_clear((pmd_t *) ptep); return pte_modify(pte, newprot);
} }
#endif /* _ASM_S390_HUGETLB_H */ #endif /* _ASM_S390_HUGETLB_H */
This diff is collapsed.
...@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) ...@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER); PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) page_table, _PAGE_INVALID,
256*sizeof(unsigned long)); 256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame; *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long)); psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32; aste = psal + 32;
......
...@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1: case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff); table = table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x39UL; return -0x39UL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_REGION2: case _ASCE_TYPE_REGION2:
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3aUL; return -0x3aUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_REGION3: case _ASCE_TYPE_REGION3:
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3bUL; return -0x3bUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */ /* fallthrough */
case _ASCE_TYPE_SEGMENT: case _ASCE_TYPE_SEGMENT:
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL; return -0x10UL;
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
if (write && (*table & _SEGMENT_ENTRY_RO)) if (write && (*table & _SEGMENT_ENTRY_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
...@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
table = table + ((address >> 12) & 0xff); table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID)) if (unlikely(*table & _PAGE_INVALID))
return -0x11UL; return -0x11UL;
if (write && (*table & _PAGE_RO)) if (write && (*table & _PAGE_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK); return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
} }
...@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm, ...@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
unsigned long *table = (unsigned long *)__pa(mm->pgd); unsigned long *table = (unsigned long *)__pa(mm->pgd);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL; return -0x10UL;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
table = table + ((address >> 12) & 0xff); table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID)) if (unlikely(*table & _PAGE_INVALID))
return -0x11UL; return -0x11UL;
if (write && (*table & _PAGE_RO)) if (write && (*table & _PAGE_PROTECT))
return -0x04UL; return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK); return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
} }
......
...@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level) ...@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n"); seq_printf(m, "I\n");
return; return;
} }
seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
...@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st, ...@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} }
/* /*
* The actual page table walker functions. In order to keep the implementation * The actual page table walker functions. In order to keep the
* of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO * implementation of print_prot() short, we only check and pass
* flags to note_page() if a region, segment or page table entry is invalid or * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
* read-only. * segment or page table entry is invalid or read-only.
* After all it's just a hint that the current level being walked contains an * After all it's just a hint that the current level being walked
* invalid or read-only entry. * contains an invalid or read-only entry.
*/ */
static void walk_pte_level(struct seq_file *m, struct pg_state *st, static void walk_pte_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd, unsigned long addr) pmd_t *pmd, unsigned long addr)
...@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, ...@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr; st->current_address = addr;
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
note_page(m, st, prot, 4); note_page(m, st, prot, 4);
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) #define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
#else #else
#define _PMD_PROT_MASK 0 #define _PMD_PROT_MASK 0
#endif #endif
......
...@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
pte_t *ptep, pte; pte_t *ptep, pte;
struct page *page; struct page *page;
mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do { do {
...@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail; struct page *head, *page, *tail;
int refs; int refs;
result = write ? 0 : _SEGMENT_ENTRY_RO; result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
mask = result | _SEGMENT_ENTRY_INV; mask = result | _SEGMENT_ENTRY_INVALID;
if ((pmd_val(pmd) & mask) != result) if ((pmd_val(pmd) & mask) != result)
return 0; return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
......
...@@ -8,21 +8,107 @@ ...@@ -8,21 +8,107 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
static inline pmd_t __pte_to_pmd(pte_t pte)
{
int none, prot;
pmd_t pmd;
/*
* Convert encoding pte bits pmd bits
* .IR.....wdtp ..R...I.....
* empty .10.....0000 -> ..0...1.....
* prot-none, clean .11.....0001 -> ..1...1.....
* prot-none, dirty .10.....0101 -> ..1...1.....
* read-only, clean .01.....0001 -> ..1...0.....
* read-only, dirty .01.....0101 -> ..1...0.....
* read-write, clean .01.....1001 -> ..0...0.....
* read-write, dirty .00.....1101 -> ..0...0.....
* Huge ptes are dirty by definition, a clean pte is made dirty
* by the conversion.
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
if (pte_val(pte) & _PAGE_INVALID)
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
none = (pte_val(pte) & _PAGE_PRESENT) &&
(pte_val(pte) & _PAGE_INVALID);
prot = (pte_val(pte) & _PAGE_PROTECT);
if (prot || none)
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
} else
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* ..R...I..... .IR.....wdtp
* empty ..0...1..... -> .10.....0000
* prot-none, young ..1...1..... -> .10.....0101
* read-only, young ..1...0..... -> .01.....0101
* read-write, young ..0...0..... -> .00.....1101
* Huge ptes are dirty by definition
*/
if (pmd_present(pmd)) {
pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
(pmd_val(pmd) & PAGE_MASK);
if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
pte_val(pte) |= _PAGE_INVALID;
else {
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_PROTECT;
else
pte_val(pte) |= _PAGE_WRITE;
}
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval) pte_t *ptep, pte_t pte)
{ {
pmd_t *pmdp = (pmd_t *) pteptr; pmd_t pmd;
unsigned long mask;
pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) { if (!MACHINE_HAS_HPAGE) {
pteptr = (pte_t *) pte_page(pteval)[1].index; pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
mask = pte_val(pteval) & pmd_val(pmd) |= pte_page(pte)[1].index;
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); } else
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long origin;
pmd_t pmd;
pmd = *(pmd_t *) ptep;
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= *(unsigned long *) origin;
} }
return __pmd_to_pte(pmd);
}
pmd_val(*pmdp) = pte_val(pteval); pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep);
if (MACHINE_HAS_IDTE)
__pmd_idte(addr, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte;
} }
int arch_prepare_hugepage(struct page *page) int arch_prepare_hugepage(struct page *page)
...@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page) ...@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index; ptep = (pte_t *) page[1].index;
if (!ptep) if (!ptep)
return; return;
clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t)); PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep); page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0; page[1].index = 0;
......
...@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) ...@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
if (!enable) { if (!enable) {
__ptep_ipte(address, pte); __ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY; pte_val(*pte) = _PAGE_INVALID;
continue; continue;
} }
pte_val(*pte) = __pa(address); pte_val(*pte) = __pa(address);
......
...@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) ...@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
struct gmap_rmap *rmap; struct gmap_rmap *rmap;
struct page *page; struct page *page;
if (*table & _SEGMENT_ENTRY_INV) if (*table & _SEGMENT_ENTRY_INVALID)
return 0; return 0;
page = pfn_to_page(*table >> PAGE_SHIFT); page = pfn_to_page(*table >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
...@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) ...@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
kfree(rmap); kfree(rmap);
break; break;
} }
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
return 1; return 1;
} }
...@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap, ...@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
return -ENOMEM; return -ENOMEM;
new = (unsigned long *) page_to_phys(page); new = (unsigned long *) page_to_phys(page);
crst_table_init(new, init); crst_table_init(new, init);
if (*table & _REGION_ENTRY_INV) { if (*table & _REGION_ENTRY_INVALID) {
list_add(&page->lru, &gmap->crst_list); list_add(&page->lru, &gmap->crst_list);
*table = (unsigned long) new | _REGION_ENTRY_LENGTH | *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK); (*table & _REGION_ENTRY_TYPE_MASK);
...@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) ...@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
for (off = 0; off < len; off += PMD_SIZE) { for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the guest addr space page table */ /* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff); table = gmap->table + (((to + off) >> 53) & 0x7ff);
if (*table & _REGION_ENTRY_INV) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff); table = table + (((to + off) >> 42) & 0x7ff);
if (*table & _REGION_ENTRY_INV) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff); table = table + (((to + off) >> 31) & 0x7ff);
if (*table & _REGION_ENTRY_INV) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 20) & 0x7ff); table = table + (((to + off) >> 20) & 0x7ff);
/* Clear segment table entry in guest address space. */ /* Clear segment table entry in guest address space. */
flush |= gmap_unlink_segment(gmap, table); flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV; *table = _SEGMENT_ENTRY_INVALID;
} }
out: out:
spin_unlock(&gmap->mm->page_table_lock); spin_unlock(&gmap->mm->page_table_lock);
...@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, ...@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
for (off = 0; off < len; off += PMD_SIZE) { for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the gmap address space page table */ /* Walk the gmap address space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff); table = gmap->table + (((to + off) >> 53) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) && if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
goto out_unmap; goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff); table = table + (((to + off) >> 42) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) && if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
goto out_unmap; goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff); table = table + (((to + off) >> 31) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) && if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
goto out_unmap; goto out_unmap;
table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
...@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, ...@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
/* Store 'from' address in an invalid segment table entry. */ /* Store 'from' address in an invalid segment table entry. */
flush |= gmap_unlink_segment(gmap, table); flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
_SEGMENT_ENTRY_PROTECT);
} }
spin_unlock(&gmap->mm->page_table_lock); spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem); up_read(&gmap->mm->mmap_sem);
...@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) ...@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
unsigned long *table; unsigned long *table;
table = gmap->table + ((address >> 53) & 0x7ff); table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
...@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) ...@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
return PTR_ERR(segment_ptr); return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */ /* Convert the gmap address to an mm address. */
segment = *segment_ptr; segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) { if (!(segment & _SEGMENT_ENTRY_INVALID)) {
page = pfn_to_page(segment >> PAGE_SHIFT); page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK); return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) { } else if (segment & _SEGMENT_ENTRY_PROTECT) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
return vmaddr | (address & ~PMD_MASK); return vmaddr | (address & ~PMD_MASK);
} }
...@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) ...@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
list_for_each_entry_safe(rmap, next, &mp->mapper, list) { list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
*rmap->entry = *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; _SEGMENT_ENTRY_PROTECT);
list_del(&rmap->list); list_del(&rmap->list);
kfree(rmap); kfree(rmap);
flush = 1; flush = 1;
...@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) ...@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
/* Convert the gmap address to an mm address. */ /* Convert the gmap address to an mm address. */
while (1) { while (1) {
segment = *segment_ptr; segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) { if (!(segment & _SEGMENT_ENTRY_INVALID)) {
/* Page table is present */ /* Page table is present */
page = pfn_to_page(segment >> PAGE_SHIFT); page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK); return mp->vmaddr | (address & ~PMD_MASK);
} }
if (!(segment & _SEGMENT_ENTRY_RO)) if (!(segment & _SEGMENT_ENTRY_PROTECT))
/* Nothing mapped in the gmap address space. */ /* Nothing mapped in the gmap address space. */
break; break;
rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
...@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) ...@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
while (address < to) { while (address < to) {
/* Walk the gmap address space page table */ /* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff); table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) { if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff); table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) { if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff); table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) { if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff); table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) { if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
...@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) ...@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
continue; continue;
/* Set notification bit in the pgste of the pte */ /* Set notification bit in the pgste of the pte */
entry = *ptep; entry = *ptep;
if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
pgste = pgste_get_lock(ptep); pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT; pgste_val(pgste) |= PGSTE_IN_BIT;
pgste_set_unlock(ptep, pgste); pgste_set_unlock(ptep, pgste);
...@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, ...@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
page->index = (unsigned long) mp; page->index = (unsigned long) mp;
atomic_set(&page->_mapcount, 3); atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
return table; return table;
} }
...@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) ...@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
pgtable_page_ctor(page); pgtable_page_ctor(page);
atomic_set(&page->_mapcount, 1); atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
} else { } else {
...@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) ...@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
list_del(lh); list_del(lh);
} }
ptep = (pte_t *) pgtable; ptep = (pte_t *) pgtable;
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
ptep++; ptep++;
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_INVALID;
return pgtable; return pgtable;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
...@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) ...@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte) if (!pte)
return NULL; return NULL;
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t)); PTRS_PER_PTE * sizeof(pte_t));
return pte; return pte;
} }
...@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) | pud_val(*pu_dir) = __pa(address) |
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
(ro ? _REGION_ENTRY_RO : 0); (ro ? _REGION_ENTRY_PROTECT : 0);
address += PUD_SIZE; address += PUD_SIZE;
continue; continue;
} }
...@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) | pmd_val(*pm_dir) = __pa(address) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
(ro ? _SEGMENT_ENTRY_RO : 0); (ro ? _SEGMENT_ENTRY_PROTECT : 0);
address += PMD_SIZE; address += PMD_SIZE;
continue; continue;
} }
...@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
} }
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); pte_val(*pt_dir) = __pa(address) |
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
address += PAGE_SIZE; address += PAGE_SIZE;
} }
ret = 0; ret = 0;
...@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) ...@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_t *pt_dir; pte_t *pt_dir;
pte_t pte; pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY; pte_val(pte) = _PAGE_INVALID;
while (address < end) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) { if (pgd_none(*pg_dir)) {
...@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page =__pa(vmem_alloc_pages(0)); new_page =__pa(vmem_alloc_pages(0));
if (!new_page) if (!new_page)
goto out; goto out;
pte_val(*pt_dir) = __pa(new_page); pte_val(*pt_dir) =
__pa(new_page) | pgprot_val(PAGE_KERNEL);
} }
address += PAGE_SIZE; address += PAGE_SIZE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment