Commit f20730ef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull SMP cross-CPU function-call updates from Ingo Molnar:

 - Remove diagnostics and adjust config for CSD lock diagnostics

 - Add a generic IPI-sending tracepoint, as currently there's no easy
   way to instrument IPI origins: it's arch dependent and for some major
   architectures it's not even consistently available.

* tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  trace,smp: Trace all smp_function_call*() invocations
  trace: Add trace_ipi_send_cpu()
  sched, smp: Trace smp callback causing an IPI
  smp: reword smp call IPI comment
  treewide: Trace IPIs sent via smp_send_reschedule()
  irq_work: Trace self-IPIs sent via arch_irq_work_raise()
  smp: Trace IPIs sent via arch_send_call_function_ipi_mask()
  sched, smp: Trace IPIs sent via send_call_function_single_ipi()
  trace: Add trace_ipi_send_cpumask()
  kernel/smp: Make csdlock_debug= resettable
  locking/csd_lock: Remove per-CPU data indirection from CSD lock debugging
  locking/csd_lock: Remove added data from CSD lock debugging
  locking/csd_lock: Add Kconfig option for csd_debug default
parents 586b222d 5c312497
......@@ -912,15 +912,14 @@
cs89x0_media= [HW,NET]
Format: { rj45 | aui | bnc }
csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call
handling. When switched on, additional debug data is
printed to the console in case a hanging CPU is
detected, and that CPU is pinged again in order to try
to resolve the hang situation.
0: disable csdlock debugging (default)
1: enable basic csdlock debugging (minor impact)
ext: enable extended csdlock debugging (more impact,
but more data)
csdlock_debug= [KNL] Enable or disable debug add-ons of cross-CPU
function call handling. When switched on,
additional debug data is printed to the console
in case a hanging CPU is detected, and that
CPU is pinged again in order to try to resolve
the hang situation. The default value of this
option depends on the CSD_LOCK_WAIT_DEBUG_DEFAULT
Kconfig option.
dasd= [HW,NET]
See header of drivers/s390/block/dasd_devmap.c.
......
......@@ -562,7 +562,7 @@ handle_ipi(struct pt_regs *regs)
}
void
smp_send_reschedule(int cpu)
arch_smp_send_reschedule(int cpu)
{
#ifdef DEBUG_IPI_MSG
if (cpu == hard_smp_processor_id())
......
......@@ -292,7 +292,7 @@ static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
ipi_send_msg_one(cpu, msg);
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
ipi_send_msg_one(cpu, IPI_RESCHEDULE);
}
......
......@@ -48,7 +48,6 @@
#include <asm/mach/arch.h>
#include <asm/mpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
/*
......@@ -749,7 +748,7 @@ void __init set_smp_ipi_range(int ipi_base, int n)
ipi_setup(smp_processor_id());
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
......
......@@ -20,6 +20,8 @@
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <trace/events/ipi.h>
#define OWL_CPU1_ADDR 0x50
#define OWL_CPU1_FLAG 0x5c
......
......@@ -51,7 +51,6 @@
#include <asm/ptrace.h>
#include <asm/virt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
......@@ -979,7 +978,7 @@ void __init set_smp_ipi_range(int ipi_base, int n)
ipi_setup(smp_processor_id());
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
......
......@@ -140,7 +140,7 @@ void smp_send_stop(void)
on_each_cpu(ipi_stop, NULL, 1);
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
......
......@@ -217,7 +217,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
}
......
......@@ -220,11 +220,11 @@ kdump_smp_send_init(void)
* Called with preemption disabled.
*/
void
smp_send_reschedule (int cpu)
arch_smp_send_reschedule (int cpu)
{
ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
/*
* Called with preemption disabled.
......
......@@ -155,11 +155,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
......
......@@ -66,7 +66,7 @@ extern void calculate_cpu_foreign_map(void);
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
static inline void smp_send_reschedule(int cpu)
static inline void arch_smp_send_reschedule(int cpu)
{
extern const struct plat_smp_ops *mp_ops; /* private */
......
......@@ -173,7 +173,7 @@ void handle_IPI(unsigned int ipi_msg)
}
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
......
......@@ -246,8 +246,8 @@ void kgdb_roundup_cpus(void)
inline void
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
void
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void
arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void
smp_send_all_nop(void)
......
......@@ -61,6 +61,8 @@
#include <asm/kup.h>
#include <asm/fadump.h>
#include <trace/events/ipi.h>
#ifdef DEBUG
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
......@@ -364,12 +366,12 @@ static inline void do_message_pass(int cpu, int msg)
#endif
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
do_message_pass(cpu, PPC_MSG_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{
......
......@@ -43,6 +43,7 @@
#include <linux/compiler.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <asm/ftrace.h>
#include <asm/reg.h>
......@@ -80,6 +81,8 @@
#include <asm/dtl.h>
#include <asm/plpar_wrappers.h>
#include <trace/events/ipi.h>
#include "book3s.h"
#include "book3s_hv.h"
......
......@@ -20,6 +20,8 @@
#include <asm/opal.h>
#include <asm/smp.h>
#include <trace/events/ipi.h>
#include "subcore.h"
#include "powernv.h"
......
......@@ -333,8 +333,8 @@ bool smp_crash_stop_failed(void)
}
#endif
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
send_ipi_single(cpu, IPI_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
......@@ -553,7 +553,7 @@ void arch_send_call_function_single_ipi(int cpu)
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
}
......
......@@ -256,7 +256,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
(bogosum / (5000/HZ)) % 100);
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
......
......@@ -120,7 +120,7 @@ void cpu_panic(void)
struct linux_prom_registers smp_penguin_ctable = { 0 };
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
/*
* CPU model dependent way of implementing IPI generation targeting
......
......@@ -1430,7 +1430,7 @@ static unsigned long send_cpu_poke(int cpu)
return hv_err;
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
if (cpu == smp_processor_id()) {
WARN_ON_ONCE(preemptible());
......
......@@ -99,7 +99,7 @@ static inline void __noreturn play_dead(void)
BUG();
}
static inline void smp_send_reschedule(int cpu)
static inline void arch_smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
}
......
......@@ -27,6 +27,7 @@
#include <linux/swap.h>
#include <linux/rwsem.h>
#include <linux/cc_platform.h>
#include <linux/smp.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
......@@ -41,6 +42,9 @@
#include <asm/fpu/api.h>
#include <asm/virtext.h>
#include <trace/events/ipi.h>
#include "trace.h"
#include "svm.h"
......
......@@ -60,7 +60,9 @@
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
#include <linux/suspend.h>
#include <linux/smp.h>
#include <trace/events/ipi.h>
#include <trace/events/kvm.h>
#include <asm/debugreg.h>
......
......@@ -391,7 +391,7 @@ void arch_send_call_function_single_ipi(int cpu)
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
}
void smp_send_reschedule(int cpu)
void arch_smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
......
......@@ -125,8 +125,15 @@ extern void smp_send_stop(void);
/*
* sends a 'reschedule' event to another CPU:
*/
extern void smp_send_reschedule(int cpu);
extern void arch_smp_send_reschedule(int cpu);
/*
* scheduler_ipi() is inline so can't be passed as callback reason, but the
* callsite IP should be sufficient for root-causing IPIs sent from here.
*/
#define smp_send_reschedule(cpu) ({ \
trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
arch_smp_send_reschedule(cpu); \
})
/*
* Prepare machine for booting other CPUs.
......
......@@ -35,6 +35,50 @@ TRACE_EVENT(ipi_raise,
TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
);
TRACE_EVENT(ipi_send_cpu,
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
TP_ARGS(cpu, callsite, callback),
TP_STRUCT__entry(
__field(unsigned int, cpu)
__field(void *, callsite)
__field(void *, callback)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->callsite = (void *)callsite;
__entry->callback = callback;
),
TP_printk("cpu=%u callsite=%pS callback=%pS",
__entry->cpu, __entry->callsite, __entry->callback)
);
TRACE_EVENT(ipi_send_cpumask,
TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback),
TP_ARGS(cpumask, callsite, callback),
TP_STRUCT__entry(
__cpumask(cpumask)
__field(void *, callsite)
__field(void *, callback)
),
TP_fast_assign(
__assign_cpumask(cpumask, cpumask_bits(cpumask));
__entry->callsite = (void *)callsite;
__entry->callback = callback;
),
TP_printk("cpumask=%s callsite=%pS callback=%pS",
__get_cpumask(cpumask), __entry->callsite, __entry->callback)
);
DECLARE_EVENT_CLASS(ipi_handler,
TP_PROTO(const char *reason),
......
......@@ -22,6 +22,8 @@
#include <asm/processor.h>
#include <linux/kasan.h>
#include <trace/events/ipi.h>
static DEFINE_PER_CPU(struct llist_head, raised_list);
static DEFINE_PER_CPU(struct llist_head, lazy_list);
static DEFINE_PER_CPU(struct task_struct *, irq_workd);
......@@ -74,6 +76,14 @@ void __weak arch_irq_work_raise(void)
*/
}
static __always_inline void irq_work_raise(struct irq_work *work)
{
if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt())
trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
arch_irq_work_raise();
}
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
static void __irq_work_queue_local(struct irq_work *work)
{
......@@ -99,7 +109,7 @@ static void __irq_work_queue_local(struct irq_work *work)
/* If the work is "lazy", handle it from next tick if any */
if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
irq_work_raise(work);
}
/* Enqueue the irq work @work on the current CPU */
......
......@@ -80,6 +80,7 @@
#define CREATE_TRACE_POINTS
#include <linux/sched/rseq_api.h>
#include <trace/events/sched.h>
#include <trace/events/ipi.h>
#undef CREATE_TRACE_POINTS
#include "sched.h"
......@@ -95,6 +96,9 @@
#include "../../io_uring/io-wq.h"
#include "../smpboot.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
* associated with them) to allow external modules to probe them.
......@@ -3848,14 +3852,20 @@ void sched_ttwu_pending(void *arg)
rq_unlock_irqrestore(rq, &rf);
}
void send_call_function_single_ipi(int cpu)
/*
* Prepare the scene for sending an IPI for a remote smp_call
*
* Returns true if the caller can proceed with sending the IPI.
* Returns false otherwise.
*/
bool call_function_single_prep_ipi(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (!set_nr_if_polling(rq->idle))
arch_send_call_function_single_ipi(cpu);
else
if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
trace_sched_wake_idle_without_ipi(cpu);
return false;
}
return true;
}
/*
......
......@@ -6,7 +6,7 @@
extern void sched_ttwu_pending(void *arg);
extern void send_call_function_single_ipi(int cpu);
extern bool call_function_single_prep_ipi(int cpu);
#ifdef CONFIG_SMP
extern void flush_smp_call_function_queue(void);
......
This diff is collapsed.
......@@ -1490,6 +1490,15 @@ config CSD_LOCK_WAIT_DEBUG
include the IPI handler function currently executing (if any)
and relevant stack traces.
config CSD_LOCK_WAIT_DEBUG_DEFAULT
bool "Default csd_lock_wait() debugging on at boot time"
depends on CSD_LOCK_WAIT_DEBUG
depends on 64BIT
default n
help
This option causes the csdlock_debug= kernel boot parameter to
default to 1 (basic debugging) instead of 0 (no debugging).
endmenu # lock debugging
config TRACE_IRQFLAGS
......
......@@ -62,11 +62,14 @@
#include "kvm_mm.h"
#include "vfio.h"
#include <trace/events/ipi.h>
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
#include <linux/kvm_dirty_ring.h>
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment