Commit d091fcb9 authored by Vineet Gupta's avatar Vineet Gupta

ARC: MMUv4 preps/2 - Reshuffle PTE bits

With previous commit freeing up PTE bits, reassign them so as to:

- Match the bit to H/w counterpart where possible
  (e.g. MMUv2 GLOBAL/PRESENT, this avoids a shift in create_tlb())
- Avoid holes in _PAGE_xxx definitions
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 64b703ef
...@@ -60,30 +60,24 @@ ...@@ -60,30 +60,24 @@
#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_READ (1<<5) /* Page has user read perm (H) */ #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ #define _PAGE_FILE (1<<7) /* page cache/ swap (S) */
#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
#else /* MMU v3 onwards */ #else /* MMU v3 onwards */
/* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */ #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
/* PD0 */ #define _PAGE_FILE (1<<6) /* page cache/ swap (S) */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
usable for shared TLB entries (H) */ usable for shared TLB entries (H) */
#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
#endif #endif
/* vmalloc permissions */ /* vmalloc permissions */
......
...@@ -342,7 +342,6 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) ...@@ -342,7 +342,6 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
unsigned long flags; unsigned long flags;
unsigned int idx, asid_or_sasid, rwx; unsigned int idx, asid_or_sasid, rwx;
unsigned long pd0_flags;
/* /*
* create_tlb() assumes that current->mm == vma->mm, since * create_tlb() assumes that current->mm == vma->mm, since
...@@ -381,17 +380,13 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) ...@@ -381,17 +380,13 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
/* update this PTE credentials */ /* update this PTE credentials */
pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
/* Create HW TLB entry Flags (in PD0) from PTE Flags */ /* Create HW TLB(PD0,PD1) from PTE */
#if (CONFIG_ARC_MMU_VER <= 2)
pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
#else
pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
#endif
/* ASID for this task */ /* ASID for this task */
asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); write_aux_reg(ARC_REG_TLBPD0, address | asid_or_sasid |
(pte_val(*ptep) & PTE_BITS_IN_PD0));
/* /*
* ARC MMU provides fully orthogonal access bits for K/U mode, * ARC MMU provides fully orthogonal access bits for K/U mode,
......
...@@ -229,9 +229,6 @@ ex_saved_reg1: ...@@ -229,9 +229,6 @@ ex_saved_reg1:
sr r3, [ARC_REG_TLBPD1] ; these go in PD1 sr r3, [ARC_REG_TLBPD1] ; these go in PD1
and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */
lsr r2, r2 ; shift PTE flags to match layout in PD0
#endif
lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment