Commit 1af19331 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: Relax PACA address limitations

Book3S PACA memory allocation is restricted by the RMA limit and also
must not take SLB faults when accessed in virtual mode. Currently a
fixed 256MB limit is used for this, which is imprecise and sub-optimal.

Update the paca allocation limits to use use the ppc64_rma_size for RMA
limit, and share the safe_stack_limit() that is currently used for stack
allocations that must not take virtual mode faults.

The safe_stack_limit() name is changed to ppc64_bolted_size() to match
ppc64_rma_size and some comments are updated. We also need to use
early_mmu_has_feature() because we are now calling this function prior
to the jump label patching that enables mmu_has_feature().
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
[mpe: Change mmu_has_feature() to early_mmu_has_feature()]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent c610d65c
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include "setup.h"
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
/* /*
...@@ -208,15 +210,14 @@ void __init allocate_pacas(void) ...@@ -208,15 +210,14 @@ void __init allocate_pacas(void)
u64 limit; u64 limit;
int cpu; int cpu;
limit = ppc64_rma_size;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* /*
* We can't take SLB misses on the paca, and we want to access them * We access pacas in real mode, and cannot take SLB faults
* in real mode, so allocate them within the RMA and also within * on them when in virtual mode, so allocate them accordingly.
* the first segment.
*/ */
limit = min(0x10000000ULL, limit); limit = min(ppc64_bolted_size(), ppc64_rma_size);
#else
limit = ppc64_rma_size;
#endif #endif
paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
......
...@@ -51,6 +51,10 @@ void record_spr_defaults(void); ...@@ -51,6 +51,10 @@ void record_spr_defaults(void);
static inline void record_spr_defaults(void) { }; static inline void record_spr_defaults(void) { };
#endif #endif
#ifdef CONFIG_PPC64
u64 ppc64_bolted_size(void);
#endif
/* /*
* Having this in kvm_ppc.h makes include dependencies too * Having this in kvm_ppc.h makes include dependencies too
* tricky to solve for setup-common.c so have it here. * tricky to solve for setup-common.c so have it here.
......
...@@ -565,25 +565,31 @@ void __init initialize_cache_info(void) ...@@ -565,25 +565,31 @@ void __init initialize_cache_info(void)
DBG(" <- initialize_cache_info()\n"); DBG(" <- initialize_cache_info()\n");
} }
/* This returns the limit below which memory accesses to the linear /*
* mapping are guarnateed not to cause a TLB or SLB miss. This is * This returns the limit below which memory accesses to the linear
* used to allocate interrupt or emergency stacks for which our * mapping are guarnateed not to cause an architectural exception (e.g.,
* exception entry path doesn't deal with being interrupted. * TLB or SLB miss fault).
*
* This is used to allocate PACAs and various interrupt stacks that
* that are accessed early in interrupt handlers that must not cause
* re-entrant interrupts.
*/ */
static __init u64 safe_stack_limit(void) __init u64 ppc64_bolted_size(void)
{ {
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
/* Freescale BookE bolts the entire linear mapping */ /* Freescale BookE bolts the entire linear mapping */
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
return linear_map_top; return linear_map_top;
/* Other BookE, we assume the first GB is bolted */ /* Other BookE, we assume the first GB is bolted */
return 1ul << 30; return 1ul << 30;
#else #else
/* BookS radix, does not take faults on linear mapping */
if (early_radix_enabled()) if (early_radix_enabled())
return ULONG_MAX; return ULONG_MAX;
/* BookS, the first segment is bolted */ /* BookS hash, the first segment is bolted */
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T; return 1UL << SID_SHIFT_1T;
return 1UL << SID_SHIFT; return 1UL << SID_SHIFT;
#endif #endif
...@@ -591,7 +597,7 @@ static __init u64 safe_stack_limit(void) ...@@ -591,7 +597,7 @@ static __init u64 safe_stack_limit(void)
void __init irqstack_early_init(void) void __init irqstack_early_init(void)
{ {
u64 limit = safe_stack_limit(); u64 limit = ppc64_bolted_size();
unsigned int i; unsigned int i;
/* /*
...@@ -676,7 +682,7 @@ void __init emergency_stack_init(void) ...@@ -676,7 +682,7 @@ void __init emergency_stack_init(void)
* initialized in kernel/irq.c. These are initialized here in order * initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible. * to have emergency stacks available as early as possible.
*/ */
limit = min(safe_stack_limit(), ppc64_rma_size); limit = min(ppc64_bolted_size(), ppc64_rma_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct thread_info *ti; struct thread_info *ti;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment