Commit d7744a09 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.

It can map all of the linear kernel mappings with zero TSB hash
conflicts for systems with 16GB or less ram.  In such cases, on
SUN4V, once we load up this TSB the first time with all the
mappings, we never take a linear kernel mapping TLB miss ever
again, the hypervisor handles them all.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9cc3a1ac
...@@ -121,6 +121,12 @@ kvmap_dtlb_obp: ...@@ -121,6 +121,12 @@ kvmap_dtlb_obp:
nop nop
.align 32 .align 32
kvmap_dtlb_tsb4m_load:
KTSB_LOCK_TAG(%g1, %g2, %g7)
KTSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
kvmap_dtlb: kvmap_dtlb:
/* %g6: TAG TARGET */ /* %g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
...@@ -133,6 +139,13 @@ kvmap_dtlb_4v: ...@@ -133,6 +139,13 @@ kvmap_dtlb_4v:
brgez,pn %g4, kvmap_dtlb_nonlinear brgez,pn %g4, kvmap_dtlb_nonlinear
nop nop
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
/* TSB entry address left in %g1, lookup linear PTE.
* Must preserve %g1 and %g6 (TAG).
*/
kvmap_dtlb_tsb4m_miss:
sethi %hi(kpte_linear_bitmap), %g2 sethi %hi(kpte_linear_bitmap), %g2
or %g2, %lo(kpte_linear_bitmap), %g2 or %g2, %lo(kpte_linear_bitmap), %g2
...@@ -163,7 +176,7 @@ kvmap_dtlb_4v: ...@@ -163,7 +176,7 @@ kvmap_dtlb_4v:
.globl kvmap_linear_patch .globl kvmap_linear_patch
kvmap_linear_patch: kvmap_linear_patch:
ba,pt %xcc, kvmap_dtlb_load ba,pt %xcc, kvmap_dtlb_tsb4m_load
xor %g2, %g4, %g5 xor %g2, %g4, %g5
kvmap_dtlb_vmalloc_addr: kvmap_dtlb_vmalloc_addr:
......
...@@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly; ...@@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
*/ */
unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
/* A special kernel TSB for 4MB and 256MB linear mappings. */
struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
#define MAX_BANKS 32 #define MAX_BANKS 32
static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
...@@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void) ...@@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void)
{ {
unsigned long ktsb_pa; unsigned long ktsb_pa;
/* First KTSB for PAGE_SIZE mappings. */
ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
switch (PAGE_SIZE) { switch (PAGE_SIZE) {
...@@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void) ...@@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void)
ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].tsb_base = ktsb_pa;
ktsb_descr[0].resv = 0; ktsb_descr[0].resv = 0;
/* XXX When we have a kernel large page size TSB, describe /* Second KTSB for 4MB/256MB mappings. */
* XXX it in ktsb_descr[1] here. ktsb_pa = (kern_base +
*/ ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
HV_PGSZ_MASK_256MB);
ktsb_descr[1].assoc = 1;
ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
ktsb_descr[1].ctx_idx = 0;
ktsb_descr[1].tsb_base = ktsb_pa;
ktsb_descr[1].resv = 0;
} }
void __cpuinit sun4v_ktsb_register(void) void __cpuinit sun4v_ktsb_register(void)
...@@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void) ...@@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void)
pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
func = HV_FAST_MMU_TSB_CTX0; func = HV_FAST_MMU_TSB_CTX0;
/* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ arg0 = 2;
arg0 = 1;
arg1 = pa; arg1 = pa;
__asm__ __volatile__("ta %6" __asm__ __volatile__("ta %6"
: "=&r" (func), "=&r" (arg0), "=&r" (arg1) : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
...@@ -1160,7 +1172,9 @@ void __init paging_init(void) ...@@ -1160,7 +1172,9 @@ void __init paging_init(void)
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
/* Invalidate both kernel TSBs. */
memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
if (tlb_type == hypervisor) if (tlb_type == hypervisor)
sun4v_pgprot_init(); sun4v_pgprot_init();
......
...@@ -243,6 +243,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -243,6 +243,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
#define KERNEL_TSB_SIZE_BYTES (32 * 1024) #define KERNEL_TSB_SIZE_BYTES (32 * 1024)
#define KERNEL_TSB_NENTRIES \ #define KERNEL_TSB_NENTRIES \
(KERNEL_TSB_SIZE_BYTES / 16) (KERNEL_TSB_SIZE_BYTES / 16)
#define KERNEL_TSB4M_NENTRIES 4096
/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
* on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
...@@ -263,4 +264,18 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -263,4 +264,18 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
be,a,pt %xcc, OK_LABEL; \ be,a,pt %xcc, OK_LABEL; \
mov REG4, REG1; mov REG4, REG1;
/* This version uses a trick, the TAG is already (VADDR >> 22) so
* we can make use of that for the index computation.
*/
#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
sethi %hi(swapper_4m_tsb), REG1; \
or REG1, %lo(swapper_4m_tsb), REG1; \
and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \
KTSB_LOAD_QUAD(REG2, REG3); \
cmp REG3, TAG; \
be,a,pt %xcc, OK_LABEL; \
mov REG4, REG1;
#endif /* !(_SPARC64_TSB_H) */ #endif /* !(_SPARC64_TSB_H) */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment