Commit fc8a898c authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'fixes' into next

Merge our fixes branch to bring in some changes that conflict with
upcoming next content.
parents 544f823e 2ea31e2e
...@@ -163,7 +163,6 @@ config PPC ...@@ -163,7 +163,6 @@ config PPC
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF select BINFMT_ELF
select BUILDTIME_TABLE_SORT select BUILDTIME_TABLE_SORT
......
...@@ -210,6 +210,10 @@ ld_version() ...@@ -210,6 +210,10 @@ ld_version()
gsub(".*version ", ""); gsub(".*version ", "");
gsub("-.*", ""); gsub("-.*", "");
split($1,a, "."); split($1,a, ".");
if( length(a[3]) == "8" )
# a[3] is probably a date of format yyyymmdd used for release snapshots. We
# can assume it to be zero as it does not signify a new version as such.
a[3] = 0;
print a[1]*100000000 + a[2]*1000000 + a[3]*10000; print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
exit exit
}' }'
......
...@@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb) ...@@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
{ {
if (radix_enabled()) if (radix_enabled())
radix__tlb_flush(tlb); radix__tlb_flush(tlb);
return hash__tlb_flush(tlb);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -175,6 +175,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) ...@@ -175,6 +175,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
return flags; return flags;
} }
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
{
unsigned long flags = irq_soft_mask_return();
irq_soft_mask_set(flags & ~mask);
return flags;
}
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return irq_soft_mask_return(); return irq_soft_mask_return();
...@@ -194,7 +203,7 @@ static inline void arch_local_irq_enable(void) ...@@ -194,7 +203,7 @@ static inline void arch_local_irq_enable(void)
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
return irq_soft_mask_set_return(IRQS_DISABLED); return irq_soft_mask_or_return(IRQS_DISABLED);
} }
static inline bool arch_irqs_disabled_flags(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags)
...@@ -333,10 +342,11 @@ bool power_pmu_wants_prompt_pmi(void); ...@@ -333,10 +342,11 @@ bool power_pmu_wants_prompt_pmi(void);
* is a different soft-masked interrupt pending that requires hard * is a different soft-masked interrupt pending that requires hard
* masking. * masking.
*/ */
static inline bool should_hard_irq_enable(void) static inline bool should_hard_irq_enable(struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(mfmsr() & MSR_EE); WARN_ON(mfmsr() & MSR_EE);
} }
...@@ -349,8 +359,17 @@ static inline bool should_hard_irq_enable(void) ...@@ -349,8 +359,17 @@ static inline bool should_hard_irq_enable(void)
* *
* TODO: Add test for 64e * TODO: Add test for 64e
*/ */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
return false; if (!power_pmu_wants_prompt_pmi())
return false;
/*
* If PMIs are disabled then IRQs should be disabled as well,
* so we shouldn't see this condition, check for it just in
* case because we are about to enable PMIs.
*/
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
return false;
}
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK) if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false; return false;
...@@ -360,18 +379,16 @@ static inline bool should_hard_irq_enable(void) ...@@ -360,18 +379,16 @@ static inline bool should_hard_irq_enable(void)
/* /*
* Do the hard enabling, only call this if should_hard_irq_enable is true. * Do the hard enabling, only call this if should_hard_irq_enable is true.
* This allows PMI interrupts to profile irq handlers.
*/ */
static inline void do_hard_irq_enable(void) static inline void do_hard_irq_enable(void)
{ {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
WARN_ON(mfmsr() & MSR_EE);
}
/* /*
* This allows PMI interrupts (and watchdog soft-NMIs) through. * Asynch interrupts come in with IRQS_ALL_DISABLED,
* There is no other reason to enable this way. * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
*/ */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable(); __hard_irq_enable();
} }
...@@ -454,7 +471,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) ...@@ -454,7 +471,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE); return !(regs->msr & MSR_EE);
} }
static __always_inline bool should_hard_irq_enable(void) static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{ {
return false; return false;
} }
......
...@@ -137,7 +137,7 @@ struct imc_pmu { ...@@ -137,7 +137,7 @@ struct imc_pmu {
* are inited. * are inited.
*/ */
struct imc_pmu_ref { struct imc_pmu_ref {
struct mutex lock; spinlock_t lock;
unsigned int id; unsigned int id;
int refc; int refc;
}; };
......
...@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) ...@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync(); ppc_msgsync();
if (should_hard_irq_enable()) if (should_hard_irq_enable(regs))
do_hard_irq_enable(); do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id()); kvmppc_clear_host_ipi(smp_processor_id());
......
...@@ -864,7 +864,7 @@ _GLOBAL(load_up_spe) ...@@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
* SPE unavailable trap from kernel - print a message, but let * SPE unavailable trap from kernel - print a message, but let
* the task use SPE in the kernel until it returns to user mode. * the task use SPE in the kernel until it returns to user mode.
*/ */
KernelSPE: SYM_FUNC_START_LOCAL(KernelSPE)
lwz r3,_MSR(r1) lwz r3,_MSR(r1)
oris r3,r3,MSR_SPE@h oris r3,r3,MSR_SPE@h
stw r3,_MSR(r1) /* enable use of SPE after return */ stw r3,_MSR(r1) /* enable use of SPE after return */
...@@ -881,6 +881,7 @@ KernelSPE: ...@@ -881,6 +881,7 @@ KernelSPE:
#endif #endif
.align 4,0 .align 4,0
SYM_FUNC_END(KernelSPE)
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
/* /*
......
...@@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void) ...@@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
*/ */
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable) static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
{ {
bool must_hard_disable = (exit_must_hard_disable() || !restartable);
/* This must be done with RI=1 because tracing may touch vmaps */ /* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on(); trace_hardirqs_on();
if (exit_must_hard_disable() || !restartable) if (must_hard_disable)
__hard_EE_RI_disable(); __hard_EE_RI_disable();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* This pattern matches prep_irq_for_idle */ /* This pattern matches prep_irq_for_idle */
if (unlikely(lazy_irq_pending_nocheck())) { if (unlikely(lazy_irq_pending_nocheck())) {
if (exit_must_hard_disable() || !restartable) { if (must_hard_disable) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
__hard_RI_enable(); __hard_RI_enable();
} }
......
...@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp) ...@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
irq = static_call(ppc_get_irq)(); irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */ /* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable()) if (should_hard_irq_enable(regs))
do_hard_irq_enable(); do_hard_irq_enable();
/* And finally process it */ /* And finally process it */
......
...@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) ...@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
} }
/* Conditionally hard-enable interrupts. */ /* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable()) { if (should_hard_irq_enable(regs)) {
/* /*
* Ensure a positive value is written to the decrementer, or * Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions. * else some CPUs will continue to take decrementer exceptions.
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/kexec_ranges.h> #include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h> #include <asm/crashdump-ppc64.h>
#include <asm/mmzone.h>
#include <asm/prom.h> #include <asm/prom.h>
struct umem_info { struct umem_info {
...@@ -989,10 +990,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) ...@@ -989,10 +990,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
* linux,drconf-usable-memory properties. Get an approximate on the * linux,drconf-usable-memory properties. Get an approximate on the
* number of usable memory entries and use for FDT size estimation. * number of usable memory entries and use for FDT size estimation.
*/ */
usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) + if (drmem_lmb_size()) {
(2 * (resource_size(&crashk_res) / drmem_lmb_size()))); usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
extra_size = (unsigned int)(usm_entries * sizeof(u64)); extra_size = (unsigned int)(usm_entries * sizeof(u64));
} else {
extra_size = 0;
}
/* /*
* Get the number of CPU nodes in the current DT. This allows to * Get the number of CPU nodes in the current DT. This allows to
......
...@@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) ...@@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
static void kvmppc_fill_pt_regs(struct pt_regs *regs) static void kvmppc_fill_pt_regs(struct pt_regs *regs)
{ {
ulong r1, ip, msr, lr; ulong r1, msr, lr;
asm("mr %0, 1" : "=r"(r1)); asm("mr %0, 1" : "=r"(r1));
asm("mflr %0" : "=r"(lr)); asm("mflr %0" : "=r"(lr));
asm("mfmsr %0" : "=r"(msr)); asm("mfmsr %0" : "=r"(msr));
asm("bl 1f; 1: mflr %0" : "=r"(ip));
memset(regs, 0, sizeof(*regs)); memset(regs, 0, sizeof(*regs));
regs->gpr[1] = r1; regs->gpr[1] = r1;
regs->nip = ip; regs->nip = _THIS_IP_;
regs->msr = msr; regs->msr = msr;
regs->link = lr; regs->link = lr;
} }
......
...@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table, ...@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
void hpt_clear_stress(void); void hpt_clear_stress(void);
static struct timer_list stress_hpt_timer; static struct timer_list stress_hpt_timer;
void stress_hpt_timer_fn(struct timer_list *timer) static void stress_hpt_timer_fn(struct timer_list *timer)
{ {
int next_cpu; int next_cpu;
......
...@@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void) ...@@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
end = (unsigned long)__end_rodata; end = (unsigned long)__end_rodata;
radix__change_memory_range(start, end, _PAGE_WRITE); radix__change_memory_range(start, end, _PAGE_WRITE);
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
end = start + PAGE_SIZE;
if (overlaps_interrupt_vector_text(start, end))
radix__change_memory_range(start, end, _PAGE_WRITE);
else
break;
}
} }
void radix__mark_initmem_nx(void) void radix__mark_initmem_nx(void)
...@@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e ...@@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
static unsigned long next_boundary(unsigned long addr, unsigned long end) static unsigned long next_boundary(unsigned long addr, unsigned long end)
{ {
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
unsigned long stext_phys;
stext_phys = __pa_symbol(_stext);
// Relocatable kernel running at non-zero real address
if (stext_phys != 0) {
// The end of interrupts code at zero is a rodata boundary
unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
if (addr < end_intr)
return end_intr;
// Start of relocated kernel text is a rodata boundary
if (addr < stext_phys)
return stext_phys;
}
if (addr < __pa_symbol(__srwx_boundary)) if (addr < __pa_symbol(__srwx_boundary))
return __pa_symbol(__srwx_boundary); return __pa_symbol(__srwx_boundary);
#endif #endif
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment