Commit e5098611 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: cleanup page table definitions

Improve the encoding of the different pte types and the naming of the
page, segment table and region table bits. Due to the different pte
encoding the hugetlbfs primitives need to be adapted as well. To improve
compatability with common code make the huge ptes use the encoding of
normal ptes. The conversion between the pte and pmd encoding for a huge
pte is done with set_huge_pte_at and huge_ptep_get.
Overall the code is now easier to understand.
Reviewed-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 416fd0ff
......@@ -17,6 +17,9 @@
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
/*
* If the arch doesn't supply something else, assume that hugepage
......@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);
static inline pte_t huge_pte_wrprotect(pte_t pte)
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_val(pte) |= _PAGE_RO;
return pte;
pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
}
static inline int huge_pte_none(pte_t pte)
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
!(pte_val(pte) & _SEGMENT_ENTRY_RO);
huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
pte_t pte = *ptep;
unsigned long mask;
if (!MACHINE_HAS_HPAGE) {
ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
if (ptep) {
mask = pte_val(pte) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte = pte_mkhuge(*ptep);
pte_val(pte) |= mask;
int changed = !pte_same(huge_ptep_get(ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
}
return pte;
}
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
_SEGMENT_ENTRY_INV;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}
static inline void huge_ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
return changed;
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
huge_ptep_invalidate(mm, addr, ptep);
return pte;
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
if (__changed) { \
huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
})
#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (huge_pte_write(__pte)) { \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
})
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
huge_ptep_invalidate(vma->vm_mm, address, ptep);
return mk_pte(page, pgprot);
}
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
static inline int huge_pte_none(pte_t pte)
{
pte_t pte;
pmd_t pmd;
pmd = mk_pmd_phys(page_to_phys(page), pgprot);
pte_val(pte) = pmd_val(pmd);
return pte;
return pte_none(pte);
}
static inline int huge_pte_write(pte_t pte)
{
pmd_t pmd;
pmd_val(pmd) = pte_val(pte);
return pmd_write(pmd);
return pte_write(pte);
}
static inline int huge_pte_dirty(pte_t pte)
{
/* No dirty bit in the segment table entry. */
return 0;
return pte_dirty(pte);
}
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
pmd_t pmd;
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
return pte;
return pte_mkwrite(pte);
}
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
/* No dirty bit in the segment table entry. */
return pte;
return pte_mkdirty(pte);
}
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
pmd_t pmd;
pmd_val(pmd) = pte_val(pte);
pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
return pte;
return pte_wrprotect(pte);
}
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
pmd_clear((pmd_t *) ptep);
return pte_modify(pte, newprot);
}
#endif /* _ASM_S390_HUGETLB_H */
This diff is collapsed.
......@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
clear_table((unsigned long *) page_table, _PAGE_INVALID,
256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame;
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32;
......
......@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x39UL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_REGION2:
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3aUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_REGION3:
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3bUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_SEGMENT:
table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV))
if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL;
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
if (write && (*table & _SEGMENT_ENTRY_RO))
if (write && (*table & _SEGMENT_ENTRY_PROTECT))
return -0x04UL;
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
......@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID))
return -0x11UL;
if (write && (*table & _PAGE_RO))
if (write && (*table & _PAGE_PROTECT))
return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
}
......@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
unsigned long *table = (unsigned long *)__pa(mm->pgd);
table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV))
if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID))
return -0x11UL;
if (write && (*table & _PAGE_RO))
if (write && (*table & _PAGE_PROTECT))
return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
}
......
......@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n");
return;
}
seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n');
}
......@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
/*
* The actual page table walker functions. In order to keep the implementation
* of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO
* flags to note_page() if a region, segment or page table entry is invalid or
* read-only.
* After all it's just a hint that the current level being walked contains an
* invalid or read-only entry.
* The actual page table walker functions. In order to keep the
* implementation of print_prot() short, we only check and pass
* _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
* segment or page table entry is invalid or read-only.
* After all it's just a hint that the current level being walked
* contains an invalid or read-only entry.
*/
static void walk_pte_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd, unsigned long addr)
......@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr;
pte = pte_offset_kernel(pmd, addr);
prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID);
prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
note_page(m, st, prot, 4);
addr += PAGE_SIZE;
}
}
#ifdef CONFIG_64BIT
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO)
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
#else
#define _PMD_PROT_MASK 0
#endif
......
......@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
pte_t *ptep, pte;
struct page *page;
mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
......@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail;
int refs;
result = write ? 0 : _SEGMENT_ENTRY_RO;
mask = result | _SEGMENT_ENTRY_INV;
result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
mask = result | _SEGMENT_ENTRY_INVALID;
if ((pmd_val(pmd) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
......
......@@ -8,21 +8,107 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
static inline pmd_t __pte_to_pmd(pte_t pte)
{
int none, prot;
pmd_t pmd;
/*
* Convert encoding pte bits pmd bits
* .IR.....wdtp ..R...I.....
* empty .10.....0000 -> ..0...1.....
* prot-none, clean .11.....0001 -> ..1...1.....
* prot-none, dirty .10.....0101 -> ..1...1.....
* read-only, clean .01.....0001 -> ..1...0.....
* read-only, dirty .01.....0101 -> ..1...0.....
* read-write, clean .01.....1001 -> ..0...0.....
* read-write, dirty .00.....1101 -> ..0...0.....
* Huge ptes are dirty by definition, a clean pte is made dirty
* by the conversion.
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
if (pte_val(pte) & _PAGE_INVALID)
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
none = (pte_val(pte) & _PAGE_PRESENT) &&
(pte_val(pte) & _PAGE_INVALID);
prot = (pte_val(pte) & _PAGE_PROTECT);
if (prot || none)
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
} else
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* ..R...I..... .IR.....wdtp
* empty ..0...1..... -> .10.....0000
* prot-none, young ..1...1..... -> .10.....0101
* read-only, young ..1...0..... -> .01.....0101
* read-write, young ..0...0..... -> .00.....1101
* Huge ptes are dirty by definition
*/
if (pmd_present(pmd)) {
pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
(pmd_val(pmd) & PAGE_MASK);
if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
pte_val(pte) |= _PAGE_INVALID;
else {
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_PROTECT;
else
pte_val(pte) |= _PAGE_WRITE;
}
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval)
pte_t *ptep, pte_t pte)
{
pmd_t *pmdp = (pmd_t *) pteptr;
unsigned long mask;
pmd_t pmd;
pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) {
pteptr = (pte_t *) pte_page(pteval)[1].index;
mask = pte_val(pteval) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= pte_page(pte)[1].index;
} else
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long origin;
pmd_t pmd;
pmd = *(pmd_t *) ptep;
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= *(unsigned long *) origin;
}
return __pmd_to_pte(pmd);
}
pmd_val(*pmdp) = pte_val(pteval);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep);
if (MACHINE_HAS_IDTE)
__pmd_idte(addr, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte;
}
int arch_prepare_hugepage(struct page *page)
......@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
......
......@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_TYPE_EMPTY;
pte_val(*pte) = _PAGE_INVALID;
continue;
}
pte_val(*pte) = __pa(address);
......
......@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
struct gmap_rmap *rmap;
struct page *page;
if (*table & _SEGMENT_ENTRY_INV)
if (*table & _SEGMENT_ENTRY_INVALID)
return 0;
page = pfn_to_page(*table >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
......@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
kfree(rmap);
break;
}
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
*table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
return 1;
}
......@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
return -ENOMEM;
new = (unsigned long *) page_to_phys(page);
crst_table_init(new, init);
if (*table & _REGION_ENTRY_INV) {
if (*table & _REGION_ENTRY_INVALID) {
list_add(&page->lru, &gmap->crst_list);
*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
......@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 20) & 0x7ff);
/* Clear segment table entry in guest address space. */
flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV;
*table = _SEGMENT_ENTRY_INVALID;
}
out:
spin_unlock(&gmap->mm->page_table_lock);
......@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the gmap address space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) &&
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) &&
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff);
if ((*table & _REGION_ENTRY_INV) &&
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
goto out_unmap;
table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
......@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
/* Store 'from' address in an invalid segment table entry. */
flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
*table = (from + off) | (_SEGMENT_ENTRY_INVALID |
_SEGMENT_ENTRY_PROTECT);
}
spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem);
......@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
unsigned long *table;
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
......@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) {
if (!(segment & _SEGMENT_ENTRY_INVALID)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) {
} else if (segment & _SEGMENT_ENTRY_PROTECT) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
return vmaddr | (address & ~PMD_MASK);
}
......@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
*rmap->entry =
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
*rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
_SEGMENT_ENTRY_PROTECT);
list_del(&rmap->list);
kfree(rmap);
flush = 1;
......@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
/* Convert the gmap address to an mm address. */
while (1) {
segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) {
if (!(segment & _SEGMENT_ENTRY_INVALID)) {
/* Page table is present */
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
}
if (!(segment & _SEGMENT_ENTRY_RO))
if (!(segment & _SEGMENT_ENTRY_PROTECT))
/* Nothing mapped in the gmap address space. */
break;
rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
......@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
while (address < to) {
/* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) {
if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) {
if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV)) {
if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
......@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
continue;
/* Set notification bit in the pgste of the pte */
entry = *ptep;
if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT;
pgste_set_unlock(ptep, pgste);
......@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
page->index = (unsigned long) mp;
atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
return table;
}
......@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
} else {
......@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
list_del(lh);
}
ptep = (pte_t *) pgtable;
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
pte_val(*ptep) = _PAGE_INVALID;
ptep++;
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
pte_val(*ptep) = _PAGE_INVALID;
return pgtable;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
......@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
......@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) |
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
(ro ? _REGION_ENTRY_RO : 0);
(ro ? _REGION_ENTRY_PROTECT : 0);
address += PUD_SIZE;
continue;
}
......@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
(ro ? _SEGMENT_ENTRY_RO : 0);
(ro ? _SEGMENT_ENTRY_PROTECT : 0);
address += PMD_SIZE;
continue;
}
......@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
pt_dir = pte_offset_kernel(pm_dir, address);
pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
pte_val(*pt_dir) = __pa(address) |
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
address += PAGE_SIZE;
}
ret = 0;
......@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_t *pt_dir;
pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY;
pte_val(pte) = _PAGE_INVALID;
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
......@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
pte_val(*pt_dir) = __pa(new_page);
pte_val(*pt_dir) =
__pa(new_page) | pgprot_val(PAGE_KERNEL);
}
address += PAGE_SIZE;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment