Commit e7e81847 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: move machine check SLB flushing to mm/slb.c

The machine check code that flushes and restores bolted segments in
real mode belongs in mm/slb.c. This will also be used by pseries
machine check and idle code in future changes.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ae24ce5e
...@@ -497,6 +497,9 @@ extern void hpte_init_native(void); ...@@ -497,6 +497,9 @@ extern void hpte_init_native(void);
extern void slb_initialize(void); extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void); extern void slb_flush_and_rebolt(void);
void slb_flush_all_realmode(void);
void __slb_restore_bolted_realmode(void);
void slb_restore_bolted_realmode(void);
extern void slb_vmalloc_update(void); extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size); extern void slb_set_size(u16 size);
......
...@@ -62,11 +62,8 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) ...@@ -62,11 +62,8 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
static void flush_and_reload_slb(void) static void flush_and_reload_slb(void)
{ {
struct slb_shadow *slb;
unsigned long i, n;
/* Invalidate all SLBs */ /* Invalidate all SLBs */
asm volatile("slbmte %0,%0; slbia" : : "r" (0)); slb_flush_all_realmode();
#ifdef CONFIG_KVM_BOOK3S_HANDLER #ifdef CONFIG_KVM_BOOK3S_HANDLER
/* /*
...@@ -76,22 +73,17 @@ static void flush_and_reload_slb(void) ...@@ -76,22 +73,17 @@ static void flush_and_reload_slb(void)
if (get_paca()->kvm_hstate.in_guest) if (get_paca()->kvm_hstate.in_guest)
return; return;
#endif #endif
if (early_radix_enabled())
/* For host kernel, reload the SLBs from shadow SLB buffer. */
slb = get_slb_shadow();
if (!slb)
return; return;
n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE); /*
* This probably shouldn't happen, but it may be possible it's
/* Load up the SLB entries from shadow SLB */ * called in early boot before SLB shadows are allocated.
for (i = 0; i < n; i++) { */
unsigned long rb = be64_to_cpu(slb->save_area[i].esid); if (!get_slb_shadow())
unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); return;
rb = (rb & ~0xFFFul) | i; slb_restore_bolted_realmode();
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
}
} }
#endif #endif
......
...@@ -90,6 +90,45 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, ...@@ -90,6 +90,45 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" ); : "memory" );
} }
/*
* Insert bolted entries into SLB (which may not be empty, so don't clear
* slb_cache_ptr).
*/
void __slb_restore_bolted_realmode(void)
{
struct slb_shadow *p = get_slb_shadow();
enum slb_index index;
/* No isync needed because realmode. */
for (index = 0; index < SLB_NUM_BOLTED; index++) {
asm volatile("slbmte %0,%1" :
: "r" (be64_to_cpu(p->save_area[index].vsid)),
"r" (be64_to_cpu(p->save_area[index].esid)));
}
}
/*
* Insert the bolted entries into an empty SLB.
* This is not the same as rebolt because the bolted segments are not
* changed, just loaded from the shadow area.
*/
void slb_restore_bolted_realmode(void)
{
__slb_restore_bolted_realmode();
get_paca()->slb_cache_ptr = 0;
}
/*
* This flushes all SLB entries including 0, so it must be realmode.
*/
void slb_flush_all_realmode(void)
{
/*
* This flushes all SLB entries including 0, so it must be realmode.
*/
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}
static void __slb_flush_and_rebolt(void) static void __slb_flush_and_rebolt(void)
{ {
/* If you change this make sure you change SLB_NUM_BOLTED /* If you change this make sure you change SLB_NUM_BOLTED
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment