Commit 1c7b4bc3 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch fixes into next

Merge our fixes branch to bring in commits that are prerequisities for further
development or would cause conflicts.
parents c7e0d9bb f9bc9bbe
...@@ -255,7 +255,7 @@ config PPC ...@@ -255,7 +255,7 @@ config PPC
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT && (!ARCH_USING_PATCHABLE_FUNCTION_ENTRY || (!CC_IS_GCC || GCC_VERSION >= 110100))
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
...@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER ...@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER
default "6" if PPC32 && PPC_64K_PAGES default "6" if PPC32 && PPC_64K_PAGES
range 4 10 if PPC32 && PPC_256K_PAGES range 4 10 if PPC32 && PPC_256K_PAGES
default "4" if PPC32 && PPC_256K_PAGES default "4" if PPC32 && PPC_256K_PAGES
range 10 10 range 10 12
default "10" default "10"
help help
The kernel page allocator limits the size of maximal physically The kernel page allocator limits the size of maximal physically
......
...@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte) ...@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
#define pte_wrprotect pte_wrprotect #define pte_wrprotect pte_wrprotect
static inline int pte_read(pte_t pte)
{
return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
}
#define pte_read pte_read
static inline int pte_write(pte_t pte) static inline int pte_write(pte_t pte)
{ {
return !(pte_val(pte) & _PAGE_RO); return !(pte_val(pte) & _PAGE_RO);
......
...@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, ...@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{ {
unsigned long old; unsigned long old;
if (pte_young(*ptep)) if (!pte_young(*ptep))
return 0; return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0; return (old & _PAGE_ACCESSED) != 0;
......
...@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte) ...@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
return pte_val(pte) & _PAGE_RW; return pte_val(pte) & _PAGE_RW;
} }
#endif #endif
#ifndef pte_read
static inline int pte_read(pte_t pte) { return 1; } static inline int pte_read(pte_t pte) { return 1; }
#endif
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
......
...@@ -137,8 +137,9 @@ ret_from_syscall: ...@@ -137,8 +137,9 @@ ret_from_syscall:
lis r4,icache_44x_need_flush@ha lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4) lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0 cmplwi cr0,r5,0
bne- 2f bne- .L44x_icache_flush
#endif /* CONFIG_PPC_47x */ #endif /* CONFIG_PPC_47x */
.L44x_icache_flush_return:
kuep_unlock kuep_unlock
lwz r4,_LINK(r1) lwz r4,_LINK(r1)
lwz r5,_CCR(r1) lwz r5,_CCR(r1)
...@@ -172,10 +173,11 @@ syscall_exit_finish: ...@@ -172,10 +173,11 @@ syscall_exit_finish:
b 1b b 1b
#ifdef CONFIG_44x #ifdef CONFIG_44x
2: li r7,0 .L44x_icache_flush:
li r7,0
iccci r0,r0 iccci r0,r0
stw r7,icache_44x_need_flush@l(r4) stw r7,icache_44x_need_flush@l(r4)
b 1b b .L44x_icache_flush_return
#endif /* CONFIG_44x */ #endif /* CONFIG_44x */
.globl ret_from_fork .globl ret_from_fork
......
...@@ -395,7 +395,7 @@ interrupt_base: ...@@ -395,7 +395,7 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION FP_UNAVAILABLE_EXCEPTION
#else #else
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception) EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
#endif #endif
/* System Call Interrupt */ /* System Call Interrupt */
......
...@@ -230,13 +230,15 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) ...@@ -230,13 +230,15 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
int i; int i;
preempt_disable();
for (i = 0; i < nr_wp_slots(); i++) { for (i = 0; i < nr_wp_slots(); i++) {
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]); struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step)) if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
goto reset; goto reset;
} }
return; goto out;
reset: reset:
regs_set_return_msr(regs, regs->msr & ~MSR_SE); regs_set_return_msr(regs, regs->msr & ~MSR_SE);
...@@ -245,6 +247,9 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) ...@@ -245,6 +247,9 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
__set_breakpoint(i, info); __set_breakpoint(i, info);
info->perf_single_step = false; info->perf_single_step = false;
} }
out:
preempt_enable();
} }
static bool is_larx_stcx_instr(int type) static bool is_larx_stcx_instr(int type)
...@@ -363,6 +368,11 @@ static void handle_p10dd1_spurious_exception(struct perf_event **bp, ...@@ -363,6 +368,11 @@ static void handle_p10dd1_spurious_exception(struct perf_event **bp,
} }
} }
/*
* Handle a DABR or DAWR exception.
*
* Called in atomic context.
*/
int hw_breakpoint_handler(struct die_args *args) int hw_breakpoint_handler(struct die_args *args)
{ {
bool err = false; bool err = false;
...@@ -490,6 +500,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler); ...@@ -490,6 +500,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler);
/* /*
* Handle single-step exceptions following a DABR hit. * Handle single-step exceptions following a DABR hit.
*
* Called in atomic context.
*/ */
static int single_step_dabr_instruction(struct die_args *args) static int single_step_dabr_instruction(struct die_args *args)
{ {
...@@ -541,6 +553,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction); ...@@ -541,6 +553,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction);
/* /*
* Handle debug exception notifications. * Handle debug exception notifications.
*
* Called in atomic context.
*/ */
int hw_breakpoint_exceptions_notify( int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data) struct notifier_block *unused, unsigned long val, void *data)
......
...@@ -131,8 +131,13 @@ void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr, ...@@ -131,8 +131,13 @@ void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
int *type, int *size, unsigned long *ea) int *type, int *size, unsigned long *ea)
{ {
struct instruction_op op; struct instruction_op op;
int err;
if (__get_user_instr(*instr, (void __user *)regs->nip)) pagefault_disable();
err = __get_user_instr(*instr, (void __user *)regs->nip);
pagefault_enable();
if (err)
return; return;
analyse_instr(&op, regs, *instr); analyse_instr(&op, regs, *instr);
......
...@@ -73,29 +73,12 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum ...@@ -73,29 +73,12 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
bool firstframe; bool firstframe;
stack_end = stack_page + THREAD_SIZE; stack_end = stack_page + THREAD_SIZE;
if (!is_idle_task(task)) {
/* // See copy_thread() for details.
* For user tasks, this is the SP value loaded on if (task->flags & PF_KTHREAD)
* kernel entry, see "PACAKSAVE(r13)" in _switch() and
* system_call_common().
*
* Likewise for non-swapper kernel threads,
* this also happens to be the top of the stack
* as setup by copy_thread().
*
* Note that stack backlinks are not properly setup by
* copy_thread() and thus, a forked task() will have
* an unreliable stack trace until it's been
* _switch()'ed to for the first time.
*/
stack_end -= STACK_USER_INT_FRAME_SIZE;
} else {
/*
* idle tasks have a custom stack layout,
* c.f. cpu_idle_thread_init().
*/
stack_end -= STACK_FRAME_MIN_SIZE; stack_end -= STACK_FRAME_MIN_SIZE;
} else
stack_end -= STACK_USER_INT_FRAME_SIZE;
if (task == current) if (task == current)
sp = current_stack_frame(); sp = current_stack_frame();
......
...@@ -1514,23 +1514,11 @@ static void do_program_check(struct pt_regs *regs) ...@@ -1514,23 +1514,11 @@ static void do_program_check(struct pt_regs *regs)
return; return;
} }
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) { /* User mode considers other cases after enabling IRQs */
ppc_inst_t insn; if (!user_mode(regs)) {
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
if (get_user_instr(insn, (void __user *)regs->nip)) { return;
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return;
}
if (ppc_inst_primary_opcode(insn) == 31 &&
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
return;
}
} }
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
return;
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) { if (reason & REASON_TM) {
...@@ -1563,16 +1551,44 @@ static void do_program_check(struct pt_regs *regs) ...@@ -1563,16 +1551,44 @@ static void do_program_check(struct pt_regs *regs)
/* /*
* If we took the program check in the kernel skip down to sending a * If we took the program check in the kernel skip down to sending a
* SIGILL. The subsequent cases all relate to emulating instructions * SIGILL. The subsequent cases all relate to user space, such as
* which we should only do for userspace. We also do not want to enable * emulating instructions which we should only do for user space. We
* interrupts for kernel faults because that might lead to further * also do not want to enable interrupts for kernel faults because that
* faults, and loose the context of the original exception. * might lead to further faults, and loose the context of the original
* exception.
*/ */
if (!user_mode(regs)) if (!user_mode(regs))
goto sigill; goto sigill;
interrupt_cond_local_irq_enable(regs); interrupt_cond_local_irq_enable(regs);
/*
* (reason & REASON_TRAP) is mostly handled before enabling IRQs,
* except get_user_instr() can sleep so we cannot reliably inspect the
* current instruction in that context. Now that we know we are
* handling a user space trap and can sleep, we can check if the trap
* was a hashchk failure.
*/
if (reason & REASON_TRAP) {
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
ppc_inst_t insn;
if (get_user_instr(insn, (void __user *)regs->nip)) {
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return;
}
if (ppc_inst_primary_opcode(insn) == 31 &&
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
return;
}
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
return;
}
/* (reason & REASON_ILLEGAL) would be the obvious thing here, /* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD) * but there seems to be a hardware bug on the 405GP (RevD)
* that means ESR is sometimes set incorrectly - either to * that means ESR is sometimes set incorrectly - either to
......
...@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode * ...@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if ((yield_count & 1) == 0) if ((yield_count & 1) == 0)
goto yield_prev; /* owner vcpu is running */ goto yield_prev; /* owner vcpu is running */
if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
goto yield_prev; /* re-sample lock owner */
spin_end(); spin_end();
preempted = true; preempted = true;
......
...@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb) ...@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
exit_flush_lazy_tlbs(mm); exit_flush_lazy_tlbs(mm);
_tlbiel_pid(mm->context.id, RIC_FLUSH_ALL); __flush_all_mm(mm, true);
/*
* It should not be possible to have coprocessors still
* attached here.
*/
if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0))
__flush_all_mm(mm, true);
preempt_enable(); preempt_enable();
} else { } else {
......
...@@ -1418,7 +1418,7 @@ static int h_24x7_event_init(struct perf_event *event) ...@@ -1418,7 +1418,7 @@ static int h_24x7_event_init(struct perf_event *event)
} }
domain = event_get_domain(event); domain = event_get_domain(event);
if (domain >= HV_PERF_DOMAIN_MAX) { if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain); pr_devel("invalid domain %d\n", domain);
return -EINVAL; return -EINVAL;
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
menuconfig PPC_82xx menuconfig PPC_82xx
bool "82xx-based boards (PQ II)" bool "82xx-based boards (PQ II)"
depends on PPC_BOOK3S_32 depends on PPC_BOOK3S_32
select FSL_SOC
if PPC_82xx if PPC_82xx
...@@ -9,7 +10,6 @@ config EP8248E ...@@ -9,7 +10,6 @@ config EP8248E
bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)" bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)"
select CPM2 select CPM2
select PPC_INDIRECT_PCI if PCI select PPC_INDIRECT_PCI if PCI
select FSL_SOC
select PHYLIB if NETDEVICES select PHYLIB if NETDEVICES
select MDIO_BITBANG if PHYLIB select MDIO_BITBANG if PHYLIB
help help
...@@ -22,7 +22,6 @@ config MGCOGE ...@@ -22,7 +22,6 @@ config MGCOGE
bool "Keymile MGCOGE" bool "Keymile MGCOGE"
select CPM2 select CPM2
select PPC_INDIRECT_PCI if PCI select PPC_INDIRECT_PCI if PCI
select FSL_SOC
help help
This enables support for the Keymile MGCOGE board. This enables support for the Keymile MGCOGE board.
......
...@@ -184,9 +184,6 @@ _GLOBAL_TOC(plpar_hcall) ...@@ -184,9 +184,6 @@ _GLOBAL_TOC(plpar_hcall)
plpar_hcall_trace: plpar_hcall_trace:
HCALL_INST_PRECALL(R5) HCALL_INST_PRECALL(R5)
std r4,STK_PARAM(R4)(r1)
mr r0,r4
mr r4,r5 mr r4,r5
mr r5,r6 mr r5,r6
mr r6,r7 mr r6,r7
...@@ -196,7 +193,7 @@ plpar_hcall_trace: ...@@ -196,7 +193,7 @@ plpar_hcall_trace:
HVSC HVSC
ld r12,STK_PARAM(R4)(r1) ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
std r4,0(r12) std r4,0(r12)
std r5,8(r12) std r5,8(r12)
std r6,16(r12) std r6,16(r12)
...@@ -296,9 +293,6 @@ _GLOBAL_TOC(plpar_hcall9) ...@@ -296,9 +293,6 @@ _GLOBAL_TOC(plpar_hcall9)
plpar_hcall9_trace: plpar_hcall9_trace:
HCALL_INST_PRECALL(R5) HCALL_INST_PRECALL(R5)
std r4,STK_PARAM(R4)(r1)
mr r0,r4
mr r4,r5 mr r4,r5
mr r5,r6 mr r5,r6
mr r6,r7 mr r6,r7
......
...@@ -59,12 +59,11 @@ override define INSTALL_RULE ...@@ -59,12 +59,11 @@ override define INSTALL_RULE
done; done;
endef endef
override define EMIT_TESTS emit_tests:
+@for TARGET in $(SUB_DIRS); do \ +@for TARGET in $(SUB_DIRS); do \
BUILD_TARGET=$(OUTPUT)/$$TARGET; \ BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET $@;\
done; done;
endef
override define CLEAN override define CLEAN
+@for TARGET in $(SUB_DIRS); do \ +@for TARGET in $(SUB_DIRS); do \
...@@ -77,4 +76,4 @@ endef ...@@ -77,4 +76,4 @@ endef
tags: tags:
find . -name '*.c' -o -name '*.h' | xargs ctags find . -name '*.c' -o -name '*.h' | xargs ctags
.PHONY: tags $(SUB_DIRS) .PHONY: tags $(SUB_DIRS) emit_tests
...@@ -30,13 +30,14 @@ override define RUN_TESTS ...@@ -30,13 +30,14 @@ override define RUN_TESTS
+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests +TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
endef endef
DEFAULT_EMIT_TESTS := $(EMIT_TESTS) emit_tests:
override define EMIT_TESTS for TEST in $(TEST_GEN_PROGS); do \
$(DEFAULT_EMIT_TESTS) BASENAME_TEST=`basename $$TEST`; \
echo "$(COLLECTION):$$BASENAME_TEST"; \
done
+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests +TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests +TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests +TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
endef
DEFAULT_INSTALL_RULE := $(INSTALL_RULE) DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
override define INSTALL_RULE override define INSTALL_RULE
...@@ -64,4 +65,4 @@ sampling_tests: ...@@ -64,4 +65,4 @@ sampling_tests:
event_code_tests: event_code_tests:
TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
.PHONY: all run_tests ebb sampling_tests event_code_tests .PHONY: all run_tests ebb sampling_tests event_code_tests emit_tests
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment