Commit 584f8b71 authored by Michael Neuling's avatar Michael Neuling Committed by Paul Mackerras

[POWERPC] Use SLB size from the device tree

Currently we hardwire the number of SLBs to 64, but PAPR says we
should use the ibm,slb-size property to obtain the number of SLB
entries.  This uses this property instead of assuming 64.  If no
property is found, we assume 64 entries as before.

This soft patches the SLB handler, so it shouldn't change performance
at all.
Signed-off-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 44ef3390
......@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
#ifdef CONFIG_PPC64
static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
if (slb_size_ptr != NULL) {
mmu_slb_size = *slb_size_ptr;
}
}
#else
#define check_cpu_slb_size(node) do { } while(0)
#endif
static struct feature_property {
const char *name;
u32 min_value;
......@@ -713,6 +727,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
if (nthreads > 1)
......
......@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
u16 mmu_slb_size = 64;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
......
......@@ -256,6 +256,7 @@ void slb_initialize(void)
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
......@@ -269,6 +270,8 @@ void slb_initialize(void)
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_io,
SLB_VSID_KERNEL | io_llp);
patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: io LLP = %04x\n", io_llp);
......
......@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
/* use a cpu feature mask if we ever change our slb size */
cmpldi r10,SLB_NUM_ENTRIES
/* This gets soft patched on boot. */
_GLOBAL(slb_compare_rr_to_size)
cmpldi r10,0
blt+ 4f
li r10,SLB_NUM_BOLTED
......
......@@ -36,6 +36,7 @@
#include <asm/mpic.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mmu.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
......@@ -302,7 +303,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
int i;
printk(KERN_ERR "slb contents:\n");
for (i = 0; i < SLB_NUM_ENTRIES; i++) {
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
......
......@@ -2543,7 +2543,7 @@ static void dump_slb(void)
printf("SLB contents of cpu %x\n", smp_processor_id());
for (i = 0; i < SLB_NUM_ENTRIES; i++) {
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
printf("%02d %016lx ", i, tmp);
......
......@@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
extern u16 mmu_slb_size;
/*
* If the processor supports 64k normal pages but not 64k cache
......
......@@ -691,12 +691,6 @@
#define PV_BE 0x0070
#define PV_PA6T 0x0090
/*
* Number of entries in the SLB. If this ever changes we should handle
* it with a use a cpu feature fixup.
*/
#define SLB_NUM_ENTRIES 64
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment