Commit c7ed509b authored by Alexander Gordeev's avatar Alexander Gordeev Committed by Heiko Carstens

s390/nmi: disable interrupts on extended save area update

Updating of the pointer to machine check extended save area
on the IPL CPU needs the lowcore protection to be disabled.
Disable interrupts while the protection is off to avoid
unnoticed writes to the lowcore.
Suggested-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent cff2d3ab
...@@ -98,9 +98,9 @@ struct mcesa { ...@@ -98,9 +98,9 @@ struct mcesa {
struct pt_regs; struct pt_regs;
void nmi_alloc_boot_cpu(struct lowcore *lc); void nmi_alloc_mcesa_early(u64 *mcesad);
int nmi_alloc_per_cpu(struct lowcore *lc); int nmi_alloc_mcesa(u64 *mcesad);
void nmi_free_per_cpu(struct lowcore *lc); void nmi_free_mcesa(u64 *mcesad);
void s390_handle_mcck(void); void s390_handle_mcck(void);
void __s390_handle_mcck(void); void __s390_handle_mcck(void);
......
...@@ -58,27 +58,27 @@ static inline unsigned long nmi_get_mcesa_size(void) ...@@ -58,27 +58,27 @@ static inline unsigned long nmi_get_mcesa_size(void)
/* /*
* The initial machine check extended save area for the boot CPU. * The initial machine check extended save area for the boot CPU.
* It will be replaced by nmi_init() with an allocated structure. * It will be replaced on the boot CPU reinit with an allocated
* The structure is required for machine check happening early in * structure. The structure is required for machine check happening
* the boot process. * early in the boot process.
*/ */
static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE); static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
void __init nmi_alloc_boot_cpu(struct lowcore *lc) void __init nmi_alloc_mcesa_early(u64 *mcesad)
{ {
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return; return;
lc->mcesad = __pa(&boot_mcesa); *mcesad = __pa(&boot_mcesa);
if (MACHINE_HAS_GS) if (MACHINE_HAS_GS)
lc->mcesad |= ilog2(MCESA_MAX_SIZE); *mcesad |= ilog2(MCESA_MAX_SIZE);
} }
static int __init nmi_init(void) static void __init nmi_alloc_cache(void)
{ {
unsigned long origin, cr0, size; unsigned long size;
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return 0; return;
size = nmi_get_mcesa_size(); size = nmi_get_mcesa_size();
if (size > MCESA_MIN_SIZE) if (size > MCESA_MIN_SIZE)
mcesa_origin_lc = ilog2(size); mcesa_origin_lc = ilog2(size);
...@@ -86,40 +86,31 @@ static int __init nmi_init(void) ...@@ -86,40 +86,31 @@ static int __init nmi_init(void)
mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL); mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
if (!mcesa_cache) if (!mcesa_cache)
panic("Couldn't create nmi save area cache"); panic("Couldn't create nmi save area cache");
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
if (!origin)
panic("Couldn't allocate nmi save area");
/* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak((void *) origin);
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
/* Replace boot_mcesa on the boot CPU */
S390_lowcore.mcesad = __pa(origin) | mcesa_origin_lc;
__ctl_load(cr0, 0, 0);
return 0;
} }
early_initcall(nmi_init);
int nmi_alloc_per_cpu(struct lowcore *lc) int __ref nmi_alloc_mcesa(u64 *mcesad)
{ {
unsigned long origin; unsigned long origin;
*mcesad = 0;
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return 0; return 0;
if (!mcesa_cache)
nmi_alloc_cache();
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL); origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
if (!origin) if (!origin)
return -ENOMEM; return -ENOMEM;
/* The pointer is stored with mcesa_bits ORed in */ /* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak((void *) origin); kmemleak_not_leak((void *) origin);
lc->mcesad = __pa(origin) | mcesa_origin_lc; *mcesad = __pa(origin) | mcesa_origin_lc;
return 0; return 0;
} }
void nmi_free_per_cpu(struct lowcore *lc) void nmi_free_mcesa(u64 *mcesad)
{ {
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return; return;
kmem_cache_free(mcesa_cache, __va(lc->mcesad & MCESA_ORIGIN_MASK)); kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK));
} }
static notrace void s390_handle_damage(void) static notrace void s390_handle_damage(void)
......
...@@ -445,7 +445,7 @@ static void __init setup_lowcore_dat_off(void) ...@@ -445,7 +445,7 @@ static void __init setup_lowcore_dat_off(void)
lc->lpp = LPP_MAGIC; lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count; lc->preempt_count = S390_lowcore.preempt_count;
nmi_alloc_boot_cpu(lc); nmi_alloc_mcesa_early(&lc->mcesad);
lc->sys_enter_timer = S390_lowcore.sys_enter_timer; lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer; lc->exit_timer = S390_lowcore.exit_timer;
lc->user_timer = S390_lowcore.user_timer; lc->user_timer = S390_lowcore.user_timer;
......
...@@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) ...@@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED; lc->preempt_count = PREEMPT_DISABLED;
if (nmi_alloc_per_cpu(lc)) if (nmi_alloc_mcesa(&lc->mcesad))
goto out; goto out;
lowcore_ptr[cpu] = lc; lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
...@@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) ...@@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[cpu] = NULL; lowcore_ptr[cpu] = NULL;
nmi_free_per_cpu(lc); nmi_free_mcesa(&lc->mcesad);
stack_free(async_stack); stack_free(async_stack);
stack_free(mcck_stack); stack_free(mcck_stack);
free_pages(nodat_stack, THREAD_SIZE_ORDER); free_pages(nodat_stack, THREAD_SIZE_ORDER);
...@@ -1271,14 +1271,15 @@ static int __init smp_reinit_ipl_cpu(void) ...@@ -1271,14 +1271,15 @@ static int __init smp_reinit_ipl_cpu(void)
{ {
unsigned long async_stack, nodat_stack, mcck_stack; unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc, *lc_ipl; struct lowcore *lc, *lc_ipl;
unsigned long flags; unsigned long flags, cr0;
u64 mcesad;
lc_ipl = lowcore_ptr[0]; lc_ipl = lowcore_ptr[0];
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc(); async_stack = stack_alloc();
mcck_stack = stack_alloc(); mcck_stack = stack_alloc();
if (!lc || !nodat_stack || !async_stack || !mcck_stack) if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
panic("Couldn't allocate memory"); panic("Couldn't allocate memory");
local_irq_save(flags); local_irq_save(flags);
...@@ -1287,6 +1288,10 @@ static int __init smp_reinit_ipl_cpu(void) ...@@ -1287,6 +1288,10 @@ static int __init smp_reinit_ipl_cpu(void)
S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET; S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET; S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET; S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
S390_lowcore.mcesad = mcesad;
__ctl_load(cr0, 0, 0);
lowcore_ptr[0] = lc; lowcore_ptr[0] = lc;
local_mcck_enable(); local_mcck_enable();
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment