powerpc: Add TLB management code for 64-bit Book3E

This adds the TLB miss handler assembly, the low level TLB flush routines
along with the necessary hook for dealing with our virtual page tables
or indirect TLB entries that need to be flushes when PTE pages are freed.

There is currently no support for hugetlbfs
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent a8f7758c
......@@ -61,4 +61,7 @@ typedef struct {
#endif /* !__ASSEMBLY__ */
#define mmu_virtual_psize MMU_PAGE_4K
#define mmu_linear_psize MMU_PAGE_256M
#endif /* _ASM_POWERPC_MMU_40X_H_ */
......@@ -79,16 +79,22 @@ typedef struct {
#if (PAGE_SHIFT == 12)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
#define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
#define mmu_virtual_psize MMU_PAGE_16K
#elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
#define mmu_virtual_psize MMU_PAGE_64K
#elif (PAGE_SHIFT == 18)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
#define mmu_virtual_psize MMU_PAGE_256K
#else
#error "Unsupported PAGE_SIZE"
#endif
#define mmu_linear_psize MMU_PAGE_256M
#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
......
......@@ -143,4 +143,7 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
#define mmu_virtual_psize MMU_PAGE_4K
#define mmu_linear_psize MMU_PAGE_8M
#endif /* _ASM_POWERPC_MMU_8XX_H_ */
......@@ -80,4 +80,10 @@ typedef struct {
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
* those definitions on hash32 at the moment anyway
*/
#define mmu_virtual_psize MMU_PAGE_4K
#define mmu_linear_psize MMU_PAGE_256M
#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
......@@ -43,6 +43,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
tsk->thread.pgdir = next->pgd;
#endif /* CONFIG_PPC32 */
/* 64-bit Book3E keeps track of current PGD in the PACA */
#ifdef CONFIG_PPC_BOOK3E_64
get_paca()->pgd = next->pgd;
#endif
/* Nothing else to do if we aren't actually switching */
if (prev == next)
return;
......@@ -89,6 +93,10 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
#ifdef CONFIG_PPC_BOOK3E_64
get_paca()->pgd = NULL;
#endif
}
#endif /* __KERNEL__ */
......
......@@ -62,6 +62,7 @@
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/swiotlb.h>
#include <asm/mmu_context.h>
#include "setup.h"
......@@ -147,6 +148,9 @@ void __init setup_paca(int cpu)
{
local_paca = &paca[cpu];
mtspr(SPRN_SPRG_PACA, local_paca);
#ifdef CONFIG_PPC_BOOK3E
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
#endif
}
/*
......
......@@ -41,7 +41,11 @@ static inline void _tlbil_pid(unsigned int pid)
#else /* CONFIG_40x || CONFIG_8xx */
extern void _tlbil_all(void);
extern void _tlbil_pid(unsigned int pid);
#ifdef CONFIG_PPC_BOOK3E
extern void _tlbil_pid_noind(unsigned int pid);
#else
#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
#endif
#endif /* !(CONFIG_40x || CONFIG_8xx) */
/*
......@@ -53,7 +57,10 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
{
asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
}
#else /* CONFIG_8xx */
#elif defined(CONFIG_PPC_BOOK3E)
extern void _tlbil_va(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind);
#else
extern void __tlbil_va(unsigned long address, unsigned int pid);
static inline void _tlbil_va(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind)
......@@ -67,11 +74,16 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
* implementation. When that becomes the case, this will be
* an extern.
*/
#ifdef CONFIG_PPC_BOOK3E
extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind);
#else
static inline void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind)
{
BUG();
}
#endif
#else /* CONFIG_PPC_MMU_NOHASH */
......
This diff is collapsed.
......@@ -7,8 +7,8 @@
*
* -- BenH
*
* Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
* IBM Corp.
* Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
* IBM Corp.
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
......@@ -34,12 +34,70 @@
#include <linux/pagemap.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/lmb.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/code-patching.h>
#include "mmu_decl.h"
#ifdef CONFIG_PPC_BOOK3E
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
.shift = 12,
.enc = BOOK3E_PAGESZ_4K,
},
[MMU_PAGE_16K] = {
.shift = 14,
.enc = BOOK3E_PAGESZ_16K,
},
[MMU_PAGE_64K] = {
.shift = 16,
.enc = BOOK3E_PAGESZ_64K,
},
[MMU_PAGE_1M] = {
.shift = 20,
.enc = BOOK3E_PAGESZ_1M,
},
[MMU_PAGE_16M] = {
.shift = 24,
.enc = BOOK3E_PAGESZ_16M,
},
[MMU_PAGE_256M] = {
.shift = 28,
.enc = BOOK3E_PAGESZ_256M,
},
[MMU_PAGE_1G] = {
.shift = 30,
.enc = BOOK3E_PAGESZ_1GB,
},
};
static inline int mmu_get_tsize(int psize)
{
return mmu_psize_defs[psize].enc;
}
#else
static inline int mmu_get_tsize(int psize)
{
/* This isn't used on !Book3E for now */
return 0;
}
#endif
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
* implementations at some point
*/
#ifdef CONFIG_PPC64
int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
unsigned long linear_map_top; /* Top of linear mapping */
#endif /* CONFIG_PPC64 */
/*
* Base TLB flushing operations:
*
......@@ -82,7 +140,7 @@ void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
0 /* tsize unused for now */, 0);
mmu_get_tsize(mmu_virtual_psize), 0);
}
EXPORT_SYMBOL(local_flush_tlb_page);
......@@ -198,7 +256,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
0 /* tsize unused for now */, 0);
mmu_get_tsize(mmu_virtual_psize), 0);
}
EXPORT_SYMBOL(flush_tlb_page);
......@@ -241,3 +299,140 @@ void tlb_flush(struct mmu_gather *tlb)
/* Push out batch of freed page tables */
pte_free_finish();
}
/*
* Below are functions specific to the 64-bit variant of Book3E though that
* may change in the future
*/
#ifdef CONFIG_PPC64
/*
* Handling of virtual linear page tables or indirect TLB entries
* flushing when PTE pages are freed
*/
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
if (book3e_htw_enabled) {
unsigned long start = address & PMD_MASK;
unsigned long end = address + PMD_SIZE;
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
/* This isn't the most optimal, ideally we would factor out the
* while preempt & CPU mask mucking around, or even the IPI but
* it will do for now
*/
while (start < end) {
__flush_tlb_page(tlb->mm, start, tsize, 1);
start += size;
}
} else {
unsigned long rmask = 0xf000000000000000ul;
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
unsigned long vpte = address & ~rmask;
#ifdef CONFIG_PPC_64K_PAGES
vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
#else
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
#endif
vpte |= rid;
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
}
}
/*
* Early initialization of the MMU TLB code
*/
static void __early_init_mmu(int boot_cpu)
{
extern unsigned int interrupt_base_book3e;
extern unsigned int exc_data_tlb_miss_htw_book3e;
extern unsigned int exc_instruction_tlb_miss_htw_book3e;
unsigned int *ibase = &interrupt_base_book3e;
unsigned int mas4;
/* XXX This will have to be decided at runtime, but right
* now our boot and TLB miss code hard wires it
*/
mmu_linear_psize = MMU_PAGE_1G;
/* Check if HW tablewalk is present, and if yes, enable it by:
*
* - patching the TLB miss handlers to branch to the
* one dedicates to it
*
* - setting the global book3e_htw_enabled
*
* - Set MAS4:INDD and default page size
*/
/* XXX This code only checks for TLB 0 capabilities and doesn't
* check what page size combos are supported by the HW. It
* also doesn't handle the case where a separate array holds
* the IND entries from the array loaded by the PT.
*/
if (boot_cpu) {
unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
/* Check if HW loader is supported */
if ((tlb0cfg & TLBnCFG_IND) &&
(tlb0cfg & TLBnCFG_PT)) {
patch_branch(ibase + (0x1c0 / 4),
(unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
patch_branch(ibase + (0x1e0 / 4),
(unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
book3e_htw_enabled = 1;
}
pr_info("MMU: Book3E Page Tables %s\n",
book3e_htw_enabled ? "Enabled" : "Disabled");
}
/* Set MAS4 based on page table setting */
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
if (book3e_htw_enabled) {
mas4 |= mas4 | MAS4_INDD;
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_256M;
#else
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_1M;
#endif
} else {
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
mmu_pte_psize = mmu_virtual_psize;
}
mtspr(SPRN_MAS4, mas4);
/* Set the global containing the top of the linear mapping
* for use by the TLB miss code
*/
linear_map_top = lmb_end_of_DRAM();
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/
mb();
}
void __init early_init_mmu(void)
{
__early_init_mmu(1);
}
void __cpuinit early_init_mmu_secondary(void)
{
__early_init_mmu(0);
}
#endif /* CONFIG_PPC64 */
......@@ -191,6 +191,85 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
isync
1: wrtee r10
blr
#elif defined(CONFIG_PPC_BOOK3E)
/*
* New Book3E (>= 2.06) implementation
*
* Note: We may be able to get away without the interrupt masking stuff
* if we save/restore MAS6 on exceptions that might modify it
*/
_GLOBAL(_tlbil_pid)
slwi r4,r3,MAS6_SPID_SHIFT
mfmsr r10
wrteei 0
mtspr SPRN_MAS6,r4
PPC_TLBILX_PID(0,0)
wrtee r10
msync
isync
blr
_GLOBAL(_tlbil_pid_noind)
slwi r4,r3,MAS6_SPID_SHIFT
mfmsr r10
ori r4,r4,MAS6_SIND
wrteei 0
mtspr SPRN_MAS6,r4
PPC_TLBILX_PID(0,0)
wrtee r10
msync
isync
blr
_GLOBAL(_tlbil_all)
PPC_TLBILX_ALL(0,0)
msync
isync
blr
_GLOBAL(_tlbil_va)
mfmsr r10
wrteei 0
cmpwi cr0,r6,0
slwi r4,r4,MAS6_SPID_SHIFT
rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
beq 1f
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBILX_VA(0,r3)
msync
isync
wrtee r10
blr
_GLOBAL(_tlbivax_bcast)
mfmsr r10
wrteei 0
cmpwi cr0,r6,0
slwi r4,r4,MAS6_SPID_SHIFT
rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
beq 1f
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBIVAX(0,r3)
eieio
tlbsync
sync
wrtee r10
blr
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is the second parameter.
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r4, 0x4(r5)
#endif
mtspr SPRN_PID,r3
isync /* Force context change */
blr
#else
#error Unsupported processor type !
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment