Commit eba5de8d authored by Scott Wood's avatar Scott Wood

powerpc/fsl-booke-64: Don't limit ppc64_rma_size to one TLB entry

This is required for kdump to work when loaded at at an address that
does not fall within the first TLB entry -- which can easily happen
because while the lower limit is enforced via reserved memory, which
doesn't affect how much is mapped, the upper limit is enforced via a
different mechanism that does.  Thus, more TLB entries are needed than
would normally be used, as the total memory to be mapped might not be a
power of two.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
parent d9e1831a
...@@ -169,7 +169,8 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, ...@@ -169,7 +169,8 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
} }
static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long ram, int max_cam_idx) unsigned long ram, int max_cam_idx,
bool dryrun)
{ {
int i; int i;
unsigned long amount_mapped = 0; unsigned long amount_mapped = 0;
...@@ -179,7 +180,9 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, ...@@ -179,7 +180,9 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long cam_sz; unsigned long cam_sz;
cam_sz = calc_cam_sz(ram, virt, phys); cam_sz = calc_cam_sz(ram, virt, phys);
settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0); if (!dryrun)
settlbcam(i, virt, phys, cam_sz,
pgprot_val(PAGE_KERNEL_X), 0);
ram -= cam_sz; ram -= cam_sz;
amount_mapped += cam_sz; amount_mapped += cam_sz;
...@@ -187,6 +190,9 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, ...@@ -187,6 +190,9 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
phys += cam_sz; phys += cam_sz;
} }
if (dryrun)
return amount_mapped;
loadcam_multi(0, i, max_cam_idx); loadcam_multi(0, i, max_cam_idx);
tlbcam_index = i; tlbcam_index = i;
...@@ -199,12 +205,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, ...@@ -199,12 +205,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
return amount_mapped; return amount_mapped;
} }
unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun)
{ {
unsigned long virt = PAGE_OFFSET; unsigned long virt = PAGE_OFFSET;
phys_addr_t phys = memstart_addr; phys_addr_t phys = memstart_addr;
return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx); return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun);
} }
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
...@@ -235,7 +241,7 @@ void __init adjust_total_lowmem(void) ...@@ -235,7 +241,7 @@ void __init adjust_total_lowmem(void)
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
i = switch_to_as1(); i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false);
restore_to_as0(i, 0, 0, 1); restore_to_as0(i, 0, 0, 1);
pr_info("Memory CAM mapping: "); pr_info("Memory CAM mapping: ");
...@@ -303,10 +309,12 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) ...@@ -303,10 +309,12 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
n = switch_to_as1(); n = switch_to_as1();
/* map a 64M area for the second relocation */ /* map a 64M area for the second relocation */
if (memstart_addr > start) if (memstart_addr > start)
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM); map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
false);
else else
map_mem_in_cams_addr(start, PAGE_OFFSET + offset, map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
0x4000000, CONFIG_LOWMEM_CAM_NUM); 0x4000000, CONFIG_LOWMEM_CAM_NUM,
false);
restore_to_as0(n, offset, __va(dt_ptr), 1); restore_to_as0(n, offset, __va(dt_ptr), 1);
/* We should never reach here */ /* We should never reach here */
panic("Relocation error"); panic("Relocation error");
......
...@@ -141,7 +141,8 @@ extern void MMU_init_hw(void); ...@@ -141,7 +141,8 @@ extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top); extern unsigned long mmu_mapin_ram(unsigned long top);
#elif defined(CONFIG_PPC_FSL_BOOK3E) #elif defined(CONFIG_PPC_FSL_BOOK3E)
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx); extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx,
bool dryrun);
extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
phys_addr_t phys); phys_addr_t phys);
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
......
...@@ -648,7 +648,7 @@ static void early_init_this_mmu(void) ...@@ -648,7 +648,7 @@ static void early_init_this_mmu(void)
if (map) if (map)
linear_map_top = map_mem_in_cams(linear_map_top, linear_map_top = map_mem_in_cams(linear_map_top,
num_cams); num_cams, false);
} }
#endif #endif
...@@ -746,10 +746,14 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, ...@@ -746,10 +746,14 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* entries are supported though that may eventually * entries are supported though that may eventually
* change. * change.
* *
* on FSL Embedded 64-bit, we adjust the RMA size to match the * on FSL Embedded 64-bit, usually all RAM is bolted, but with
* first bolted TLB entry size. We still limit max to 1G even if * unusual memory sizes it's possible for some RAM to not be mapped
* the TLB could cover more. This is due to what the early init * (such RAM is not used at all by Linux, since we don't support
* code is setup to do. * highmem on 64-bit). We limit ppc64_rma_size to what would be
* mappable if this memblock is the only one. Additional memblocks
* can only increase, not decrease, the amount that ends up getting
* mapped. We still limit max to 1G even if we'll eventually map
* more. This is due to what the early init code is set up to do.
* *
* We crop it to the size of the first MEMBLOCK to * We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case... * avoid going over total available memory just in case...
...@@ -757,8 +761,14 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, ...@@ -757,8 +761,14 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz; unsigned long linear_sz;
linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET, unsigned int num_cams;
first_memblock_base);
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else } else
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment