Commit fe036a06 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Michael Ellerman

powerpc/64/kexec: Fix MMU cleanup on radix

Just using the hash ops won't work anymore since radix will have
NULL in there. Instead create an mmu_cleanup_all() function which
will do the right thing based on the MMU mode.

For Radix, for now I clear UPRT and the PTCR, effectively switching
back to Radix with no partition table setup.

Currently set it to NULL on BookE thought it might be a good idea
to wipe the TLB there (Scott ?)
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarBalbir Singh <bsingharora@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent fc48bad5
...@@ -313,6 +313,9 @@ extern int book3e_htw_mode; ...@@ -313,6 +313,9 @@ extern int book3e_htw_mode;
* return 1, indicating that the tlb requires preloading. * return 1, indicating that the tlb requires preloading.
*/ */
#define HUGETLB_NEED_PRELOAD #define HUGETLB_NEED_PRELOAD
#define mmu_cleanup_all NULL
#endif #endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -204,6 +204,10 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; ...@@ -204,6 +204,10 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
* make it match the size our of bolted TLB area * make it match the size our of bolted TLB area
*/ */
extern u64 ppc64_rma_size; extern u64 ppc64_rma_size;
/* Cleanup function used by kexec */
extern void mmu_cleanup_all(void);
extern void radix__mmu_cleanup_all(void);
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
struct mm_struct; struct mm_struct;
......
...@@ -55,9 +55,6 @@ int default_machine_kexec_prepare(struct kimage *image) ...@@ -55,9 +55,6 @@ int default_machine_kexec_prepare(struct kimage *image)
const unsigned long *basep; const unsigned long *basep;
const unsigned int *sizep; const unsigned int *sizep;
if (!mmu_hash_ops.hpte_clear_all)
return -ENOENT;
/* /*
* Since we use the kernel fault handlers and paging code to * Since we use the kernel fault handlers and paging code to
* handle the virtual mode, we must make sure no destination * handle the virtual mode, we must make sure no destination
...@@ -379,13 +376,8 @@ void default_machine_kexec(struct kimage *image) ...@@ -379,13 +376,8 @@ void default_machine_kexec(struct kimage *image)
* a toc is easier in C, so pass in what we can. * a toc is easier in C, so pass in what we can.
*/ */
kexec_sequence(&kexec_stack, image->start, image, kexec_sequence(&kexec_stack, image->start, image,
page_address(image->control_code_page), page_address(image->control_code_page),
#ifdef CONFIG_PPC_STD_MMU mmu_cleanup_all);
mmu_hash_ops.hpte_clear_all
#else
NULL
#endif
);
/* NOTREACHED */ /* NOTREACHED */
} }
......
...@@ -116,3 +116,12 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, ...@@ -116,3 +116,12 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
return; return;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* For use by kexec */
void mmu_cleanup_all(void)
{
if (radix_enabled())
radix__mmu_cleanup_all();
else if (mmu_hash_ops.hpte_clear_all)
mmu_hash_ops.hpte_clear_all();
}
...@@ -396,6 +396,18 @@ void radix__early_init_mmu_secondary(void) ...@@ -396,6 +396,18 @@ void radix__early_init_mmu_secondary(void)
} }
} }
void radix__mmu_cleanup_all(void)
{
unsigned long lpcr;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
mtspr(SPRN_PTCR, 0);
radix__flush_tlb_all();
}
}
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment