Commit 1bc54c03 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Josh Boyer

powerpc: rework 4xx PTE access and TLB miss

This is some preliminary work to improve TLB management on SW loaded
TLB powerpc platforms. This introduce support for non-atomic PTE
operations in pgtable-ppc32.h and removes write back to the PTE from
the TLB miss handlers. In addition, the DSI interrupt code no longer
tries to fixup write permission, this is left to generic code, and
_PAGE_HWWRITE is gone.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarJosh Boyer <jwboyer@linux.vnet.ibm.com>
parent beae4c03
This diff is collapsed.
...@@ -340,6 +340,14 @@ ...@@ -340,6 +340,14 @@
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
#define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \
NORMAL_EXCEPTION_PROLOG; \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
EXC_XFER_EE_LITE(0x0300, handle_page_fault)
#define INSTRUCTION_STORAGE_EXCEPTION \ #define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \ START_EXCEPTION(InstructionStorage) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG; \
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h>
#include "mmu_decl.h" #include "mmu_decl.h"
...@@ -37,11 +38,35 @@ unsigned int tlb_44x_index; /* = 0 */ ...@@ -37,11 +38,35 @@ unsigned int tlb_44x_index; /* = 0 */
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
int icache_44x_need_flush; int icache_44x_need_flush;
static void __init ppc44x_update_tlb_hwater(void)
{
extern unsigned int tlb_44x_patch_hwater_D[];
extern unsigned int tlb_44x_patch_hwater_I[];
/* The TLB miss handlers hard codes the watermark in a cmpli
* instruction to improve performances rather than loading it
* from the global variable. Thus, we patch the instructions
* in the 2 TLB miss handlers when updating the value
*/
tlb_44x_patch_hwater_D[0] = (tlb_44x_patch_hwater_D[0] & 0xffff0000) |
tlb_44x_hwater;
flush_icache_range((unsigned long)&tlb_44x_patch_hwater_D[0],
(unsigned long)&tlb_44x_patch_hwater_D[1]);
tlb_44x_patch_hwater_I[0] = (tlb_44x_patch_hwater_I[0] & 0xffff0000) |
tlb_44x_hwater;
flush_icache_range((unsigned long)&tlb_44x_patch_hwater_I[0],
(unsigned long)&tlb_44x_patch_hwater_I[1]);
}
/* /*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
*/ */
static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
{ {
unsigned int entry = tlb_44x_hwater--;
ppc44x_update_tlb_hwater();
__asm__ __volatile__( __asm__ __volatile__(
"tlbwe %2,%3,%4\n" "tlbwe %2,%3,%4\n"
"tlbwe %1,%3,%5\n" "tlbwe %1,%3,%5\n"
...@@ -50,7 +75,7 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) ...@@ -50,7 +75,7 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
"r" (phys), "r" (phys),
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
"r" (tlb_44x_hwater--), /* slot for this TLB entry */ "r" (entry),
"i" (PPC44x_TLB_PAGEID), "i" (PPC44x_TLB_PAGEID),
"i" (PPC44x_TLB_XLAT), "i" (PPC44x_TLB_XLAT),
"i" (PPC44x_TLB_ATTRIB)); "i" (PPC44x_TLB_ATTRIB));
...@@ -58,6 +83,8 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) ...@@ -58,6 +83,8 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
void __init MMU_init_hw(void) void __init MMU_init_hw(void)
{ {
ppc44x_update_tlb_hwater();
flush_instruction_cache(); flush_instruction_cache();
} }
......
...@@ -306,7 +306,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -306,7 +306,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
flush_dcache_icache_page(page); flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
} }
pte_update(ptep, 0, _PAGE_HWEXEC); pte_update(ptep, 0, _PAGE_HWEXEC |
_PAGE_ACCESSED);
_tlbie(address, mm->context.id); _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
......
...@@ -182,6 +182,9 @@ extern int icache_44x_need_flush; ...@@ -182,6 +182,9 @@ extern int icache_44x_need_flush;
#define _PMD_SIZE_16M 0x0e0 #define _PMD_SIZE_16M 0x0e0
#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
/* Until my rework is finished, 40x still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
#elif defined(CONFIG_44x) #elif defined(CONFIG_44x)
/* /*
* Definitions for PPC440 * Definitions for PPC440
...@@ -253,17 +256,17 @@ extern int icache_44x_need_flush; ...@@ -253,17 +256,17 @@ extern int icache_44x_need_flush;
*/ */
#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
#define _PAGE_RW 0x00000002 /* S: Write permission */ #define _PAGE_RW 0x00000002 /* S: Write permission */
#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ #define _PAGE_USER 0x00000040 /* S: User page */
#define _PAGE_USER 0x00000040 /* S: User page */ #define _PAGE_ENDIAN 0x00000080 /* H: E bit */
#define _PAGE_ENDIAN 0x00000080 /* H: E bit */ #define _PAGE_GUARDED 0x00000100 /* H: G bit */
#define _PAGE_GUARDED 0x00000100 /* H: G bit */ #define _PAGE_COHERENT 0x00000200 /* H: M bit */
#define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
/* TODO: Add large page lowmem mapping support */ /* TODO: Add large page lowmem mapping support */
#define _PMD_PRESENT 0 #define _PMD_PRESENT 0
...@@ -273,6 +276,7 @@ extern int icache_44x_need_flush; ...@@ -273,6 +276,7 @@ extern int icache_44x_need_flush;
/* ERPN in a PTE never gets cleared, ignore it */ /* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffff00000000ULL #define _PTE_NONE_MASK 0xffffffff00000000ULL
#elif defined(CONFIG_FSL_BOOKE) #elif defined(CONFIG_FSL_BOOKE)
/* /*
MMU Assist Register 3: MMU Assist Register 3:
...@@ -315,6 +319,9 @@ extern int icache_44x_need_flush; ...@@ -315,6 +319,9 @@ extern int icache_44x_need_flush;
#define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK) #define _PMD_BAD (~PAGE_MASK)
/* Until my rework is finished, FSL BookE still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
#elif defined(CONFIG_8xx) #elif defined(CONFIG_8xx)
/* Definitions for 8xx embedded chips. */ /* Definitions for 8xx embedded chips. */
#define _PAGE_PRESENT 0x0001 /* Page is valid */ #define _PAGE_PRESENT 0x0001 /* Page is valid */
...@@ -345,6 +352,9 @@ extern int icache_44x_need_flush; ...@@ -345,6 +352,9 @@ extern int icache_44x_need_flush;
#define _PTE_NONE_MASK _PAGE_ACCESSED #define _PTE_NONE_MASK _PAGE_ACCESSED
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
#else /* CONFIG_6xx */ #else /* CONFIG_6xx */
/* Definitions for 60x, 740/750, etc. */ /* Definitions for 60x, 740/750, etc. */
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
...@@ -365,6 +375,10 @@ extern int icache_44x_need_flush; ...@@ -365,6 +375,10 @@ extern int icache_44x_need_flush;
#define _PMD_PRESENT 0 #define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK) #define _PMD_BAD (~PAGE_MASK)
/* Hash table based platforms need atomic updates of the linux PTE */
#define PTE_ATOMIC_UPDATES 1
#endif #endif
/* /*
...@@ -557,9 +571,11 @@ extern void add_hash_page(unsigned context, unsigned long va, ...@@ -557,9 +571,11 @@ extern void add_hash_page(unsigned context, unsigned long va,
* low PTE word since we expect ALL flag bits to be there * low PTE word since we expect ALL flag bits to be there
*/ */
#ifndef CONFIG_PTE_64BIT #ifndef CONFIG_PTE_64BIT
static inline unsigned long pte_update(pte_t *p, unsigned long clr, static inline unsigned long pte_update(pte_t *p,
unsigned long clr,
unsigned long set) unsigned long set)
{ {
#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp; unsigned long old, tmp;
__asm__ __volatile__("\ __asm__ __volatile__("\
...@@ -572,16 +588,26 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, ...@@ -572,16 +588,26 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
: "=&r" (old), "=&r" (tmp), "=m" (*p) : "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" (clr), "r" (set), "m" (*p) : "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" ); : "cc" );
#else /* PTE_ATOMIC_UPDATES */
unsigned long old = pte_val(*p);
*p = __pte((old & ~clr) | set);
#endif /* !PTE_ATOMIC_UPDATES */
#ifdef CONFIG_44x #ifdef CONFIG_44x
if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
icache_44x_need_flush = 1; icache_44x_need_flush = 1;
#endif #endif
return old; return old;
} }
#else #else /* CONFIG_PTE_64BIT */
static inline unsigned long long pte_update(pte_t *p, unsigned long clr, /* TODO: Change that to only modify the low word and move set_pte_at()
unsigned long set) * out of line
*/
static inline unsigned long long pte_update(pte_t *p,
unsigned long clr,
unsigned long set)
{ {
#ifdef PTE_ATOMIC_UPDATES
unsigned long long old; unsigned long long old;
unsigned long tmp; unsigned long tmp;
...@@ -596,13 +622,18 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, ...@@ -596,13 +622,18 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
: "=&r" (old), "=&r" (tmp), "=m" (*p) : "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
: "cc" ); : "cc" );
#else /* PTE_ATOMIC_UPDATES */
unsigned long long old = pte_val(*p);
*p = __pte((old & ~clr) | set);
#endif /* !PTE_ATOMIC_UPDATES */
#ifdef CONFIG_44x #ifdef CONFIG_44x
if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
icache_44x_need_flush = 1; icache_44x_need_flush = 1;
#endif #endif
return old; return old;
} }
#endif #endif /* CONFIG_PTE_64BIT */
/* /*
* set_pte stores a linux PTE into the linux page table. * set_pte stores a linux PTE into the linux page table.
...@@ -671,7 +702,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) ...@@ -671,7 +702,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
({ \ ({ \
int __changed = !pte_same(*(__ptep), __entry); \ int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \ if (__changed) { \
__ptep_set_access_flags(__ptep, __entry, __dirty); \ __ptep_set_access_flags(__ptep, __entry, __dirty); \
flush_tlb_page_nohash(__vma, __address); \ flush_tlb_page_nohash(__vma, __address); \
} \ } \
__changed; \ __changed; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment