Commit ad4e25a3 authored by Paul E. McKenney's avatar Paul E. McKenney

Merge branches 'doc.2017.10.20a', 'fixes.2017.10.19a', 'stall.2017.10.09a' and...

Merge branches 'doc.2017.10.20a', 'fixes.2017.10.19a', 'stall.2017.10.09a' and 'torture.2017.10.09a' into HEAD

doc.2017.10.20a: Documentation updates.
fixes.2017.10.19a: Miscellaneous fixes.
stall.2017.10.09a: RCU CPU stall-warning updates.
torture.2017.10.09a: Torture-test updates.
...@@ -3539,6 +3539,9 @@ ...@@ -3539,6 +3539,9 @@
rcutorture.stall_cpu_holdoff= [KNL] rcutorture.stall_cpu_holdoff= [KNL]
Time to wait (s) after boot before inducing stall. Time to wait (s) after boot before inducing stall.
rcutorture.stall_cpu_irqsoff= [KNL]
Disable interrupts while stalling if set.
rcutorture.stat_interval= [KNL] rcutorture.stat_interval= [KNL]
Time (s) between statistics printk()s. Time (s) between statistics printk()s.
......
...@@ -33,10 +33,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) ...@@ -33,10 +33,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
bool irq_work_queue(struct irq_work *work); bool irq_work_queue(struct irq_work *work);
#ifdef CONFIG_SMP
bool irq_work_queue_on(struct irq_work *work, int cpu); bool irq_work_queue_on(struct irq_work *work, int cpu);
#endif
void irq_work_tick(void); void irq_work_tick(void);
void irq_work_sync(struct irq_work *work); void irq_work_sync(struct irq_work *work);
......
...@@ -56,7 +56,6 @@ void __weak arch_irq_work_raise(void) ...@@ -56,7 +56,6 @@ void __weak arch_irq_work_raise(void)
*/ */
} }
#ifdef CONFIG_SMP
/* /*
* Enqueue the irq_work @work on @cpu unless it's already pending * Enqueue the irq_work @work on @cpu unless it's already pending
* somewhere. * somewhere.
...@@ -68,6 +67,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) ...@@ -68,6 +67,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
/* All work should have been flushed before going offline */ /* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu)); WARN_ON_ONCE(cpu_is_offline(cpu));
#ifdef CONFIG_SMP
/* Arch remote IPI send/receive backend aren't NMI safe */ /* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi()); WARN_ON_ONCE(in_nmi());
...@@ -78,10 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) ...@@ -78,10 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
arch_send_call_function_single_ipi(cpu); arch_send_call_function_single_ipi(cpu);
#else /* #ifdef CONFIG_SMP */
irq_work_queue(work);
#endif /* #else #ifdef CONFIG_SMP */
return true; return true;
} }
EXPORT_SYMBOL_GPL(irq_work_queue_on);
#endif
/* Enqueue the irq work @work on the current CPU */ /* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work) bool irq_work_queue(struct irq_work *work)
......
...@@ -203,6 +203,21 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) ...@@ -203,6 +203,21 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
extern int rcu_cpu_stall_suppress; extern int rcu_cpu_stall_suppress;
int rcu_jiffies_till_stall_check(void); int rcu_jiffies_till_stall_check(void);
#define rcu_ftrace_dump_stall_suppress() \
do { \
if (!rcu_cpu_stall_suppress) \
rcu_cpu_stall_suppress = 3; \
} while (0)
#define rcu_ftrace_dump_stall_unsuppress() \
do { \
if (rcu_cpu_stall_suppress == 3) \
rcu_cpu_stall_suppress = 0; \
} while (0)
#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
#define rcu_ftrace_dump_stall_suppress()
#define rcu_ftrace_dump_stall_unsuppress()
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
/* /*
...@@ -220,8 +235,12 @@ do { \ ...@@ -220,8 +235,12 @@ do { \
static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
\ \
if (!atomic_read(&___rfd_beenhere) && \ if (!atomic_read(&___rfd_beenhere) && \
!atomic_xchg(&___rfd_beenhere, 1)) \ !atomic_xchg(&___rfd_beenhere, 1)) { \
tracing_off(); \
rcu_ftrace_dump_stall_suppress(); \
ftrace_dump(oops_dump_mode); \ ftrace_dump(oops_dump_mode); \
rcu_ftrace_dump_stall_unsuppress(); \
} \
} while (0) } while (0)
void rcu_early_boot_tests(void); void rcu_early_boot_tests(void);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include "rcu_segcblist.h" #include "rcu_segcblist.h"
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/torture.h> #include <linux/torture.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/sched/debug.h>
#include "rcu.h" #include "rcu.h"
...@@ -89,6 +90,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); ...@@ -89,6 +90,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10, torture_param(int, stall_cpu_holdoff, 10,
"Time to wait before starting stall (s)."); "Time to wait before starting stall (s).");
torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stat_interval, 60, torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s"); "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test"); torture_param(int, stutter, 5, "Number of seconds to run/halt test");
...@@ -1239,6 +1241,7 @@ rcu_torture_stats_print(void) ...@@ -1239,6 +1241,7 @@ rcu_torture_stats_print(void)
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
static unsigned long rtcv_snap = ULONG_MAX; static unsigned long rtcv_snap = ULONG_MAX;
static bool splatted;
struct task_struct *wtp; struct task_struct *wtp;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
...@@ -1324,6 +1327,10 @@ rcu_torture_stats_print(void) ...@@ -1324,6 +1327,10 @@ rcu_torture_stats_print(void)
gpnum, completed, flags, gpnum, completed, flags,
wtp == NULL ? ~0UL : wtp->state, wtp == NULL ? ~0UL : wtp->state,
wtp == NULL ? -1 : (int)task_cpu(wtp)); wtp == NULL ? -1 : (int)task_cpu(wtp));
if (!splatted && wtp) {
sched_show_task(wtp);
splatted = true;
}
show_rcu_gp_kthreads(); show_rcu_gp_kthreads();
rcu_ftrace_dump(DUMP_ALL); rcu_ftrace_dump(DUMP_ALL);
} }
...@@ -1357,7 +1364,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) ...@@ -1357,7 +1364,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d " "test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d shutdown_secs=%d " "test_boost_duration=%d shutdown_secs=%d "
"stall_cpu=%d stall_cpu_holdoff=%d " "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
"n_barrier_cbs=%d " "n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d\n", "onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, nrealreaders, nfakewriters, torture_type, tag, nrealreaders, nfakewriters,
...@@ -1365,7 +1372,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) ...@@ -1365,7 +1372,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost, test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration, shutdown_secs, test_boost_interval, test_boost_duration, shutdown_secs,
stall_cpu, stall_cpu_holdoff, stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
n_barrier_cbs, n_barrier_cbs,
onoff_interval, onoff_holdoff); onoff_interval, onoff_holdoff);
} }
...@@ -1430,11 +1437,18 @@ static int rcu_torture_stall(void *args) ...@@ -1430,11 +1437,18 @@ static int rcu_torture_stall(void *args)
if (!kthread_should_stop()) { if (!kthread_should_stop()) {
stop_at = get_seconds() + stall_cpu; stop_at = get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */ /* RCU CPU stall is expected behavior in following code. */
pr_alert("rcu_torture_stall start.\n");
rcu_read_lock(); rcu_read_lock();
if (stall_cpu_irqsoff)
local_irq_disable();
else
preempt_disable(); preempt_disable();
pr_alert("rcu_torture_stall start on CPU %d.\n",
smp_processor_id());
while (ULONG_CMP_LT(get_seconds(), stop_at)) while (ULONG_CMP_LT(get_seconds(), stop_at))
continue; /* Induce RCU CPU stall warning. */ continue; /* Induce RCU CPU stall warning. */
if (stall_cpu_irqsoff)
local_irq_enable();
else
preempt_enable(); preempt_enable();
rcu_read_unlock(); rcu_read_unlock();
pr_alert("rcu_torture_stall end.\n"); pr_alert("rcu_torture_stall end.\n");
......
...@@ -534,8 +534,8 @@ module_param(rcu_kick_kthreads, bool, 0644); ...@@ -534,8 +534,8 @@ module_param(rcu_kick_kthreads, bool, 0644);
* How long the grace period must be before we start recruiting * How long the grace period must be before we start recruiting
* quiescent-state help from rcu_note_context_switch(). * quiescent-state help from rcu_note_context_switch().
*/ */
static ulong jiffies_till_sched_qs = HZ / 20; static ulong jiffies_till_sched_qs = HZ / 10;
module_param(jiffies_till_sched_qs, ulong, 0644); module_param(jiffies_till_sched_qs, ulong, 0444);
static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp); struct rcu_data *rdp);
...@@ -837,6 +837,9 @@ static void rcu_eqs_enter(bool user) ...@@ -837,6 +837,9 @@ static void rcu_eqs_enter(bool user)
* We crowbar the ->dynticks_nesting field to zero to allow for * We crowbar the ->dynticks_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count * the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period. * of interrupt nesting level during the prior busy period.
*
* If you add or remove a call to rcu_idle_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_idle_enter(void) void rcu_idle_enter(void)
{ {
...@@ -852,6 +855,9 @@ void rcu_idle_enter(void) ...@@ -852,6 +855,9 @@ void rcu_idle_enter(void)
* is permitted between this call and rcu_user_exit(). This way the * is permitted between this call and rcu_user_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes * CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace. * when the CPU runs in userspace.
*
* If you add or remove a call to rcu_user_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_user_enter(void) void rcu_user_enter(void)
{ {
...@@ -875,6 +881,9 @@ void rcu_user_enter(void) ...@@ -875,6 +881,9 @@ void rcu_user_enter(void)
* Use things like work queues to work around this limitation. * Use things like work queues to work around this limitation.
* *
* You have been warned. * You have been warned.
*
* If you add or remove a call to rcu_irq_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_irq_exit(void) void rcu_irq_exit(void)
{ {
...@@ -899,6 +908,9 @@ void rcu_irq_exit(void) ...@@ -899,6 +908,9 @@ void rcu_irq_exit(void)
/* /*
* Wrapper for rcu_irq_exit() where interrupts are enabled. * Wrapper for rcu_irq_exit() where interrupts are enabled.
*
* If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_irq_exit_irqson(void) void rcu_irq_exit_irqson(void)
{ {
...@@ -971,6 +983,9 @@ static void rcu_eqs_exit(bool user) ...@@ -971,6 +983,9 @@ static void rcu_eqs_exit(bool user)
* allow for the possibility of usermode upcalls messing up our count * allow for the possibility of usermode upcalls messing up our count
* of interrupt nesting level during the busy period that is just * of interrupt nesting level during the busy period that is just
* now starting. * now starting.
*
* If you add or remove a call to rcu_idle_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_idle_exit(void) void rcu_idle_exit(void)
{ {
...@@ -987,6 +1002,9 @@ void rcu_idle_exit(void) ...@@ -987,6 +1002,9 @@ void rcu_idle_exit(void)
* *
* Exit RCU idle mode while entering the kernel because it can * Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime. * run a RCU read side critical section anytime.
*
* If you add or remove a call to rcu_user_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_user_exit(void) void rcu_user_exit(void)
{ {
...@@ -1012,6 +1030,9 @@ void rcu_user_exit(void) ...@@ -1012,6 +1030,9 @@ void rcu_user_exit(void)
* Use things like work queues to work around this limitation. * Use things like work queues to work around this limitation.
* *
* You have been warned. * You have been warned.
*
* If you add or remove a call to rcu_irq_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_irq_enter(void) void rcu_irq_enter(void)
{ {
...@@ -1037,6 +1058,9 @@ void rcu_irq_enter(void) ...@@ -1037,6 +1058,9 @@ void rcu_irq_enter(void)
/* /*
* Wrapper for rcu_irq_enter() where interrupts are enabled. * Wrapper for rcu_irq_enter() where interrupts are enabled.
*
* If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_irq_enter_irqson(void) void rcu_irq_enter_irqson(void)
{ {
...@@ -1055,6 +1079,9 @@ void rcu_irq_enter_irqson(void) ...@@ -1055,6 +1079,9 @@ void rcu_irq_enter_irqson(void)
* that the CPU is active. This implementation permits nested NMIs, as * that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably * long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.) * run out of stack space first.)
*
* If you add or remove a call to rcu_nmi_enter(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_nmi_enter(void) void rcu_nmi_enter(void)
{ {
...@@ -1087,6 +1114,9 @@ void rcu_nmi_enter(void) ...@@ -1087,6 +1114,9 @@ void rcu_nmi_enter(void)
* RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to * to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle. * being RCU-idle.
*
* If you add or remove a call to rcu_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void rcu_nmi_exit(void) void rcu_nmi_exit(void)
{ {
...@@ -1206,6 +1236,22 @@ static int rcu_is_cpu_rrupt_from_idle(void) ...@@ -1206,6 +1236,22 @@ static int rcu_is_cpu_rrupt_from_idle(void)
return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
} }
/*
* We are reporting a quiescent state on behalf of some other CPU, so
* it is our responsibility to check for and handle potential overflow
* of the rcu_node ->gpnum counter with respect to the rcu_data counters.
* After all, the CPU might be in deep idle state, and thus executing no
* code whatsoever.
*/
static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
{
lockdep_assert_held(&rnp->lock);
if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
WRITE_ONCE(rdp->gpwrap, true);
if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
}
/* /*
* Snapshot the specified CPU's dynticks counter so that we can later * Snapshot the specified CPU's dynticks counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU * credit them with an implicit quiescent state. Return 1 if this CPU
...@@ -1216,14 +1262,33 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) ...@@ -1216,14 +1262,33 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rcu_gpnum_ovf(rdp->mynode, rdp);
rdp->mynode->gpnum))
WRITE_ONCE(rdp->gpwrap, true);
return 1; return 1;
} }
return 0; return 0;
} }
/*
* Handler for the irq_work request posted when a grace period has
* gone on for too long, but not yet long enough for an RCU CPU
* stall warning. Set state appropriately, but just complain if
* there is unexpected state on entry.
*/
static void rcu_iw_handler(struct irq_work *iwp)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
rdp = container_of(iwp, struct rcu_data, rcu_iw);
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp);
if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
rdp->rcu_iw_gpnum = rnp->gpnum;
rdp->rcu_iw_pending = false;
}
raw_spin_unlock_rcu_node(rnp);
}
/* /*
* Return true if the specified CPU has passed through a quiescent * Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks * state by virtue of being in or having passed through an dynticks
...@@ -1235,8 +1300,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1235,8 +1300,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
unsigned long jtsq; unsigned long jtsq;
bool *rnhqp; bool *rnhqp;
bool *ruqp; bool *ruqp;
unsigned long rjtsc; struct rcu_node *rnp = rdp->mynode;
struct rcu_node *rnp;
/* /*
* If the CPU passed through or entered a dynticks idle phase with * If the CPU passed through or entered a dynticks idle phase with
...@@ -1249,34 +1313,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1249,34 +1313,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++; rdp->dynticks_fqs++;
rcu_gpnum_ovf(rnp, rdp);
return 1; return 1;
} }
/* Compute and saturate jiffies_till_sched_qs. */
jtsq = jiffies_till_sched_qs;
rjtsc = rcu_jiffies_till_stall_check();
if (jtsq > rjtsc / 2) {
WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
jtsq = rjtsc / 2;
} else if (jtsq < 1) {
WRITE_ONCE(jiffies_till_sched_qs, 1);
jtsq = 1;
}
/* /*
* Has this CPU encountered a cond_resched_rcu_qs() since the * Has this CPU encountered a cond_resched_rcu_qs() since the
* beginning of the grace period? For this to be the case, * beginning of the grace period? For this to be the case,
* the CPU has to have noticed the current grace period. This * the CPU has to have noticed the current grace period. This
* might not be the case for nohz_full CPUs looping in the kernel. * might not be the case for nohz_full CPUs looping in the kernel.
*/ */
rnp = rdp->mynode; jtsq = jiffies_till_sched_qs;
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
rcu_gpnum_ovf(rnp, rdp);
return 1; return 1;
} else { } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
/* Load rcu_qs_ctr before store to rcu_urgent_qs. */ /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
smp_store_release(ruqp, true); smp_store_release(ruqp, true);
} }
...@@ -1285,6 +1340,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1285,6 +1340,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) { if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
rdp->offline_fqs++; rdp->offline_fqs++;
rcu_gpnum_ovf(rnp, rdp);
return 1; return 1;
} }
...@@ -1304,10 +1360,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1304,10 +1360,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* updates are only once every few jiffies, the probability of * updates are only once every few jiffies, the probability of
* lossage (and thus of slight grace-period extension) is * lossage (and thus of slight grace-period extension) is
* quite low. * quite low.
*
* Note that if the jiffies_till_sched_qs boot/sysfs parameter
* is set too high, we override with half of the RCU CPU stall
* warning delay.
*/ */
rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
if (!READ_ONCE(*rnhqp) && if (!READ_ONCE(*rnhqp) &&
...@@ -1316,15 +1368,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1316,15 +1368,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
WRITE_ONCE(*rnhqp, true); WRITE_ONCE(*rnhqp, true);
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */ /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
smp_store_release(ruqp, true); smp_store_release(ruqp, true);
rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */ rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
} }
/* /*
* If more than halfway to RCU CPU stall-warning time, do * If more than halfway to RCU CPU stall-warning time, do a
* a resched_cpu() to try to loosen things up a bit. * resched_cpu() to try to loosen things up a bit. Also check to
* see if the CPU is getting hammered with interrupts, but only
* once per grace period, just to keep the IPIs down to a dull roar.
*/ */
if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
resched_cpu(rdp->cpu); resched_cpu(rdp->cpu);
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
!rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
rdp->rcu_iw_pending = true;
rdp->rcu_iw_gpnum = rnp->gpnum;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
}
}
return 0; return 0;
} }
...@@ -1513,6 +1576,7 @@ static void print_cpu_stall(struct rcu_state *rsp) ...@@ -1513,6 +1576,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
{ {
int cpu; int cpu;
unsigned long flags; unsigned long flags;
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp = rcu_get_root(rsp);
long totqlen = 0; long totqlen = 0;
...@@ -1528,7 +1592,9 @@ static void print_cpu_stall(struct rcu_state *rsp) ...@@ -1528,7 +1592,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
*/ */
pr_err("INFO: %s self-detected stall on CPU", rsp->name); pr_err("INFO: %s self-detected stall on CPU", rsp->name);
print_cpu_stall_info_begin(); print_cpu_stall_info_begin();
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
print_cpu_stall_info(rsp, smp_processor_id()); print_cpu_stall_info(rsp, smp_processor_id());
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
print_cpu_stall_info_end(); print_cpu_stall_info_end();
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
...@@ -1922,6 +1988,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -1922,6 +1988,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
rdp->core_needs_qs = need_gp; rdp->core_needs_qs = need_gp;
zero_cpu_stall_ticks(rdp); zero_cpu_stall_ticks(rdp);
WRITE_ONCE(rdp->gpwrap, false); WRITE_ONCE(rdp->gpwrap, false);
rcu_gpnum_ovf(rnp, rdp);
} }
return ret; return ret;
} }
...@@ -3700,6 +3767,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) ...@@ -3700,6 +3767,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->cpu_no_qs.b.norm = true; rdp->cpu_no_qs.b.norm = true;
rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
rdp->core_needs_qs = false; rdp->core_needs_qs = false;
rdp->rcu_iw_pending = false;
rdp->rcu_iw_gpnum = rnp->gpnum - 1;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
...@@ -3737,10 +3806,24 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing) ...@@ -3737,10 +3806,24 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
*/ */
int rcutree_online_cpu(unsigned int cpu) int rcutree_online_cpu(unsigned int cpu)
{ {
sync_sched_exp_online_cleanup(cpu); unsigned long flags;
rcutree_affinity_setting(cpu, -1); struct rcu_data *rdp;
struct rcu_node *rnp;
struct rcu_state *rsp;
for_each_rcu_flavor(rsp) {
rdp = per_cpu_ptr(rsp->rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->ffmask |= rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
if (IS_ENABLED(CONFIG_TREE_SRCU)) if (IS_ENABLED(CONFIG_TREE_SRCU))
srcu_online_cpu(cpu); srcu_online_cpu(cpu);
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu);
rcutree_affinity_setting(cpu, -1);
return 0; return 0;
} }
...@@ -3750,6 +3833,19 @@ int rcutree_online_cpu(unsigned int cpu) ...@@ -3750,6 +3833,19 @@ int rcutree_online_cpu(unsigned int cpu)
*/ */
int rcutree_offline_cpu(unsigned int cpu) int rcutree_offline_cpu(unsigned int cpu)
{ {
unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
struct rcu_state *rsp;
for_each_rcu_flavor(rsp) {
rdp = per_cpu_ptr(rsp->rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->ffmask &= ~rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
rcutree_affinity_setting(cpu, cpu); rcutree_affinity_setting(cpu, cpu);
if (IS_ENABLED(CONFIG_TREE_SRCU)) if (IS_ENABLED(CONFIG_TREE_SRCU))
srcu_offline_cpu(cpu); srcu_offline_cpu(cpu);
...@@ -4198,8 +4294,7 @@ void __init rcu_init(void) ...@@ -4198,8 +4294,7 @@ void __init rcu_init(void)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
rcutree_prepare_cpu(cpu); rcutree_prepare_cpu(cpu);
rcu_cpu_starting(cpu); rcu_cpu_starting(cpu);
if (IS_ENABLED(CONFIG_TREE_SRCU)) rcutree_online_cpu(cpu);
srcu_online_cpu(cpu);
} }
} }
......
...@@ -103,6 +103,7 @@ struct rcu_node { ...@@ -103,6 +103,7 @@ struct rcu_node {
/* Online CPUs for next expedited GP. */ /* Online CPUs for next expedited GP. */
/* Any CPU that has ever been online will */ /* Any CPU that has ever been online will */
/* have its bit set. */ /* have its bit set. */
unsigned long ffmask; /* Fully functional CPUs. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */ /* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */ int grplo; /* lowest-numbered CPU or group here. */
...@@ -285,6 +286,10 @@ struct rcu_data { ...@@ -285,6 +286,10 @@ struct rcu_data {
/* 8) RCU CPU stall data. */ /* 8) RCU CPU stall data. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
bool rcu_iw_pending; /* Is ->rcu_iw pending? */
unsigned long rcu_iw_gpnum; /* ->gpnum associated with ->rcu_iw. */
int cpu; int cpu;
struct rcu_state *rsp; struct rcu_state *rsp;
......
...@@ -54,6 +54,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work); ...@@ -54,6 +54,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
* This probably needs to be excluded from -rt builds. * This probably needs to be excluded from -rt builds.
*/ */
#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
#endif /* #else #ifdef CONFIG_RCU_BOOST */ #endif /* #else #ifdef CONFIG_RCU_BOOST */
...@@ -530,7 +531,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -530,7 +531,7 @@ void rcu_read_unlock_special(struct task_struct *t)
/* Unboost if we were boosted. */ /* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
rt_mutex_unlock(&rnp->boost_mtx); rt_mutex_futex_unlock(&rnp->boost_mtx);
/* /*
* If this was the last task on the expedited lists, * If this was the last task on the expedited lists,
...@@ -911,8 +912,6 @@ void exit_rcu(void) ...@@ -911,8 +912,6 @@ void exit_rcu(void)
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
static void rcu_wake_cond(struct task_struct *t, int status) static void rcu_wake_cond(struct task_struct *t, int status)
{ {
/* /*
...@@ -1507,7 +1506,7 @@ static void rcu_prepare_for_idle(void) ...@@ -1507,7 +1506,7 @@ static void rcu_prepare_for_idle(void)
rdtp->last_accelerate = jiffies; rdtp->last_accelerate = jiffies;
for_each_rcu_flavor(rsp) { for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda); rdp = this_cpu_ptr(rsp->rda);
if (rcu_segcblist_pend_cbs(&rdp->cblist)) if (!rcu_segcblist_pend_cbs(&rdp->cblist))
continue; continue;
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
...@@ -1671,6 +1670,7 @@ static void print_cpu_stall_info_begin(void) ...@@ -1671,6 +1670,7 @@ static void print_cpu_stall_info_begin(void)
*/ */
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
{ {
unsigned long delta;
char fast_no_hz[72]; char fast_no_hz[72];
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_dynticks *rdtp = rdp->dynticks; struct rcu_dynticks *rdtp = rdp->dynticks;
...@@ -1685,11 +1685,15 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ...@@ -1685,11 +1685,15 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
ticks_value = rsp->gpnum - rdp->gpnum; ticks_value = rsp->gpnum - rdp->gpnum;
} }
print_cpu_stall_fast_no_hz(fast_no_hz, cpu); print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n", delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
cpu, cpu,
"O."[!!cpu_online(cpu)], "O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
"N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
!IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta],
ticks_value, ticks_title, ticks_value, ticks_title,
rcu_dynticks_snap(rdtp) & 0xfff, rcu_dynticks_snap(rdtp) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
......
...@@ -494,6 +494,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); ...@@ -494,6 +494,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
#endif #endif
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
module_param(rcu_cpu_stall_suppress, int, 0644); module_param(rcu_cpu_stall_suppress, int, 0644);
...@@ -575,7 +576,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); ...@@ -575,7 +576,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
module_param(rcu_task_stall_timeout, int, 0644); module_param(rcu_task_stall_timeout, int, 0644);
static void rcu_spawn_tasks_kthread(void);
static struct task_struct *rcu_tasks_kthread_ptr; static struct task_struct *rcu_tasks_kthread_ptr;
/** /**
...@@ -600,7 +600,6 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) ...@@ -600,7 +600,6 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
{ {
unsigned long flags; unsigned long flags;
bool needwake; bool needwake;
bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
rhp->next = NULL; rhp->next = NULL;
rhp->func = func; rhp->func = func;
...@@ -610,11 +609,8 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) ...@@ -610,11 +609,8 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
rcu_tasks_cbs_tail = &rhp->next; rcu_tasks_cbs_tail = &rhp->next;
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
/* We can't create the thread unless interrupts are enabled. */ /* We can't create the thread unless interrupts are enabled. */
if ((needwake && havetask) || if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
(!havetask && !irqs_disabled_flags(flags))) {
rcu_spawn_tasks_kthread();
wake_up(&rcu_tasks_cbs_wq); wake_up(&rcu_tasks_cbs_wq);
}
} }
EXPORT_SYMBOL_GPL(call_rcu_tasks); EXPORT_SYMBOL_GPL(call_rcu_tasks);
...@@ -853,27 +849,18 @@ static int __noreturn rcu_tasks_kthread(void *arg) ...@@ -853,27 +849,18 @@ static int __noreturn rcu_tasks_kthread(void *arg)
} }
} }
/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ /* Spawn rcu_tasks_kthread() at core_initcall() time. */
static void rcu_spawn_tasks_kthread(void) static int __init rcu_spawn_tasks_kthread(void)
{ {
static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
struct task_struct *t; struct task_struct *t;
if (READ_ONCE(rcu_tasks_kthread_ptr)) {
smp_mb(); /* Ensure caller sees full kthread. */
return;
}
mutex_lock(&rcu_tasks_kthread_mutex);
if (rcu_tasks_kthread_ptr) {
mutex_unlock(&rcu_tasks_kthread_mutex);
return;
}
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t)); BUG_ON(IS_ERR(t));
smp_mb(); /* Ensure others see full kthread. */ smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t); WRITE_ONCE(rcu_tasks_kthread_ptr, t);
mutex_unlock(&rcu_tasks_kthread_mutex); return 0;
} }
core_initcall(rcu_spawn_tasks_kthread);
/* Do the srcu_read_lock() for the above synchronize_srcu(). */ /* Do the srcu_read_lock() for the above synchronize_srcu(). */
void exit_tasks_rcu_start(void) void exit_tasks_rcu_start(void)
......
...@@ -505,8 +505,7 @@ void resched_cpu(int cpu) ...@@ -505,8 +505,7 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long flags; unsigned long flags;
if (!raw_spin_trylock_irqsave(&rq->lock, flags)) raw_spin_lock_irqsave(&rq->lock, flags);
return;
resched_curr(rq); resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
} }
...@@ -4842,6 +4841,7 @@ int __sched _cond_resched(void) ...@@ -4842,6 +4841,7 @@ int __sched _cond_resched(void)
preempt_schedule_common(); preempt_schedule_common();
return 1; return 1;
} }
rcu_all_qs();
return 0; return 0;
} }
EXPORT_SYMBOL(_cond_resched); EXPORT_SYMBOL(_cond_resched);
...@@ -5165,6 +5165,7 @@ void sched_show_task(struct task_struct *p) ...@@ -5165,6 +5165,7 @@ void sched_show_task(struct task_struct *p)
show_stack(p, NULL); show_stack(p, NULL);
put_task_stack(p); put_task_stack(p);
} }
EXPORT_SYMBOL_GPL(sched_show_task);
static inline bool static inline bool
state_filter_match(unsigned long state_filter, struct task_struct *p) state_filter_match(unsigned long state_filter, struct task_struct *p)
......
...@@ -42,7 +42,7 @@ else ...@@ -42,7 +42,7 @@ else
exit 1 exit 1
fi fi
T=/tmp/config_override.sh.$$ T=${TMPDIR-/tmp}/config_override.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/abat-chk-config.sh.$$ T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/configinit.sh.$$ T=${TMPDIR-/tmp}/configinit.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -35,7 +35,7 @@ then ...@@ -35,7 +35,7 @@ then
exit 1 exit 1
fi fi
T=/tmp/test-linux.sh.$$ T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/kvm-test-1-run.sh.$$ T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
scriptname=$0 scriptname=$0
args="$*" args="$*"
T=/tmp/kvm.sh.$$ T=${TMPDIR-/tmp}/kvm.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
...@@ -222,7 +222,7 @@ do ...@@ -222,7 +222,7 @@ do
exit 1 exit 1
fi fi
done done
sort -k2nr $T/cfgcpu > $T/cfgcpu.sort sort -k2nr $T/cfgcpu -T="$T" > $T/cfgcpu.sort
# Use a greedy bin-packing algorithm, sorting the list accordingly. # Use a greedy bin-packing algorithm, sorting the list accordingly.
awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus ' awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
F=$1 F=$1
title=$2 title=$2
T=/tmp/parse-build.sh.$$ T=${TMPDIR-/tmp}/parse-build.sh.$$
trap 'rm -rf $T' 0 trap 'rm -rf $T' 0
mkdir $T mkdir $T
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
# #
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
T=/tmp/parse-torture.sh.$$ T=${TMPDIR-/tmp}/parse-torture.sh.$$
file="$1" file="$1"
title="$2" title="$2"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment