Commit c0b96cf6 authored by Paul Mundt's avatar Paul Mundt

sh: Provide _PAGE_SPECIAL for 32-bit.

Allocate one of the unused PTE bits for _PAGE_SPECIAL directly. This is
prep work for fast gup and the zero page revival.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent fc6cca39
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE. * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE.
* *
* - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages. * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages.
* Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused. * Bit 10 is used for _PAGE_ACCESSED, and bit 11 is used for _PAGE_SPECIAL.
* *
* - On 29 bit platforms, bits 31 to 29 are used for the space attributes * - On 29 bit platforms, bits 31 to 29 are used for the space attributes
* and timing control which (together with bit 0) are moved into the * and timing control which (together with bit 0) are moved into the
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#define _PAGE_PROTNONE 0x200 /* software: if not present */ #define _PAGE_PROTNONE 0x200 /* software: if not present */
#define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_ACCESSED 0x400 /* software: page referenced */
#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
#define _PAGE_SPECIAL 0x800 /* software: special page */
#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1) #define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER) #define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
...@@ -148,8 +149,12 @@ ...@@ -148,8 +149,12 @@
# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) # define _PAGE_SZHUGE (_PAGE_FLAGS_HARD)
#endif #endif
/*
* Mask of bits that are to be preserved accross pgprot changes.
*/
#define _PAGE_CHG_MASK \ #define _PAGE_CHG_MASK \
(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_SPECIAL)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -328,7 +333,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) ...@@ -328,7 +333,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) #define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) #define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
#define pte_file(pte) ((pte).pte_low & _PAGE_FILE) #define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
#define pte_special(pte) (0) #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
#ifdef CONFIG_X2TLB #ifdef CONFIG_X2TLB
#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
...@@ -358,8 +363,9 @@ PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); ...@@ -358,8 +363,9 @@ PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define __HAVE_ARCH_PTE_SPECIAL
/* /*
* Macro and implementation to make a page protection as uncachable. * Macro and implementation to make a page protection as uncachable.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment