Commit 86c6a2fd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "The main changes in this cycle are:

   - 'Nested Sleep Debugging', activated when CONFIG_DEBUG_ATOMIC_SLEEP=y.

     This instruments might_sleep() checks to catch places that nest
     blocking primitives - such as mutex usage in a wait loop.  Such
     bugs can result in hard to debug races/hangs.

     Another category of invalid nesting that this facility will detect
     is the calling of blocking functions from within schedule() ->
     sched_submit_work() -> blk_schedule_flush_plug().

     There's some potential for false positives (if secondary blocking
     primitives themselves are not ready yet for this facility), but the
     kernel will warn once about such bugs per bootup, so the warning
     isn't much of a nuisance.

     This feature comes with a number of fixes, for problems uncovered
     with it, so no messages are expected normally.

   - Another round of sched/numa optimizations and refinements, for
     CONFIG_NUMA_BALANCING=y.

   - Another round of sched/dl fixes and refinements.

  Plus various smaller fixes and cleanups"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (54 commits)
  sched: Add missing rcu protection to wake_up_all_idle_cpus
  sched/deadline: Introduce start_hrtick_dl() for !CONFIG_SCHED_HRTICK
  sched/numa: Init numa balancing fields of init_task
  sched/deadline: Remove unnecessary definitions in cpudeadline.h
  sched/cpupri: Remove unnecessary definitions in cpupri.h
  sched/deadline: Fix rq->dl.pushable_tasks bug in push_dl_task()
  sched/fair: Fix stale overloaded status in the busiest group finding logic
  sched: Move p->nr_cpus_allowed check to select_task_rq()
  sched/completion: Document when to use wait_for_completion_io_*()
  sched: Update comments about CLONE_NEWUTS and CLONE_NEWIPC
  sched/fair: Kill task_struct::numa_entry and numa_group::task_list
  sched: Refactor task_struct to use numa_faults instead of numa_* pointers
  sched/deadline: Don't check CONFIG_SMP in switched_from_dl()
  sched/deadline: Reschedule from switched_from_dl() after a successful pull
  sched/deadline: Push task away if the deadline is equal to curr during wakeup
  sched/deadline: Add deadline rq status print
  sched/deadline: Fix artificial overrun introduced by yield_task_dl()
  sched/rt: Clean up check_preempt_equal_prio()
  sched/core: Use dl_bw_of() under rcu_read_lock_sched()
  sched: Check if we got a shallowest_idle_cpu before searching for least_loaded_cpu
  ...
parents bee2782f fd7de1e8
......@@ -30,9 +30,6 @@ static __always_inline void preempt_count_set(int pc)
/*
* must be macros to avoid header recursion hell
*/
#define task_preempt_count(p) \
(task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED)
#define init_task_preempt_count(p) do { \
task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
} while (0)
......
......@@ -2123,7 +2123,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
{
struct n_tty_data *ldata = tty->disc_data;
unsigned char __user *b = buf;
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int c;
int minimum, time;
ssize_t retval = 0;
......@@ -2186,10 +2186,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
nr--;
break;
}
/* This statement must be first before checking for input
so that any interrupt will set the state back to
TASK_RUNNING. */
set_current_state(TASK_INTERRUPTIBLE);
if (((minimum - (b - buf)) < ldata->minimum_to_wake) &&
((minimum - (b - buf)) >= 1))
......@@ -2220,13 +2216,13 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
n_tty_set_room(tty);
up_read(&tty->termios_rwsem);
timeout = schedule_timeout(timeout);
timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
timeout);
down_read(&tty->termios_rwsem);
continue;
}
}
__set_current_state(TASK_RUNNING);
/* Deal with packet mode. */
if (packet && b == buf) {
......@@ -2273,7 +2269,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
mutex_unlock(&ldata->atomic_read_lock);
__set_current_state(TASK_RUNNING);
if (b - buf)
retval = b - buf;
......@@ -2306,7 +2301,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
const unsigned char *b = buf;
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int c;
ssize_t retval = 0;
......@@ -2324,7 +2319,6 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
add_wait_queue(&tty->write_wait, &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
......@@ -2378,12 +2372,11 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
}
up_read(&tty->termios_rwsem);
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
down_read(&tty->termios_rwsem);
}
break_out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (b - buf != nr && tty->fasync)
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
......
......@@ -227,14 +227,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
start = buf;
group = file->private_data;
add_wait_queue(&group->notification_waitq, &wait);
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
......@@ -264,10 +263,10 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
if (start != buf)
break;
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(&group->notification_waitq, &wait);
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
......
......@@ -23,9 +23,6 @@ static __always_inline void preempt_count_set(int pc)
/*
* must be macros to avoid header recursion hell
*/
#define task_preempt_count(p) \
(task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
#define init_task_preempt_count(p) do { \
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
......
......@@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
* defined in <linux/wait.h>
*/
#define wait_event_freezekillable(wq, condition) \
({ \
int __retval; \
freezer_do_not_count(); \
__retval = wait_event_killable(wq, (condition)); \
freezer_count(); \
__retval; \
})
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
#define wait_event_freezekillable_unsafe(wq, condition) \
({ \
......@@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
__retval; \
})
#define wait_event_freezable(wq, condition) \
({ \
int __retval; \
freezer_do_not_count(); \
__retval = wait_event_interruptible(wq, (condition)); \
freezer_count(); \
__retval; \
})
#define wait_event_freezable_timeout(wq, condition, timeout) \
({ \
long __retval = timeout; \
freezer_do_not_count(); \
__retval = wait_event_interruptible_timeout(wq, (condition), \
__retval); \
freezer_count(); \
__retval; \
})
#define wait_event_freezable_exclusive(wq, condition) \
({ \
int __retval; \
freezer_do_not_count(); \
__retval = wait_event_interruptible_exclusive(wq, condition); \
freezer_count(); \
__retval; \
})
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
......@@ -331,18 +293,6 @@ static inline void set_freezable(void) {}
#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
schedule_hrtimeout_range(expires, delta, mode)
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
#define wait_event_freezable_timeout(wq, condition, timeout) \
wait_event_interruptible_timeout(wq, condition, timeout)
#define wait_event_freezable_exclusive(wq, condition) \
wait_event_interruptible_exclusive(wq, condition)
#define wait_event_freezekillable(wq, condition) \
wait_event_killable(wq, condition)
#define wait_event_freezekillable_unsafe(wq, condition) \
wait_event_killable(wq, condition)
......
......@@ -166,6 +166,15 @@ extern struct task_group root_task_group;
# define INIT_RT_MUTEXES(tsk)
#endif
#ifdef CONFIG_NUMA_BALANCING
# define INIT_NUMA_BALANCING(tsk) \
.numa_preferred_nid = -1, \
.numa_group = NULL, \
.numa_faults = NULL,
#else
# define INIT_NUMA_BALANCING(tsk)
#endif
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
......@@ -237,6 +246,7 @@ extern struct task_group root_task_group;
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
}
......
......@@ -162,6 +162,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
void ___might_sleep(const char *file, int line, int preempt_offset);
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
......@@ -175,10 +176,14 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
......
......@@ -243,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!(
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
#define __set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
(tsk)->state = (state_value); \
} while (0)
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
set_mb((tsk)->state, (state_value)); \
} while (0)
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (do_i_need_to_sleep())
* schedule();
*
* If the caller does not need such serialisation then use __set_current_state()
*/
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
current->state = (state_value); \
} while (0)
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
set_mb(current->state, (state_value)); \
} while (0)
#else
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
......@@ -259,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!(
*
* If the caller does not need such serialisation then use __set_current_state()
*/
#define __set_current_state(state_value) \
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
#define set_current_state(state_value) \
set_mb(current->state, (state_value))
#endif
/* Task command name length */
#define TASK_COMM_LEN 16
......@@ -1558,27 +1597,22 @@ struct task_struct {
struct numa_group *numa_group;
/*
* Exponential decaying average of faults on a per-node basis.
* Scheduling placement decisions are made based on the these counts.
* The values remain static for the duration of a PTE scan
* numa_faults is an array split into four regions:
* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
* in this precise order.
*
* faults_memory: Exponential decaying average of faults on a per-node
* basis. Scheduling placement decisions are made based on these
* counts. The values remain static for the duration of a PTE scan.
* faults_cpu: Track the nodes the process was running on when a NUMA
* hinting fault was incurred.
* faults_memory_buffer and faults_cpu_buffer: Record faults per node
* during the current scan window. When the scan completes, the counts
* in faults_memory and faults_cpu decay and these values are copied.
*/
unsigned long *numa_faults_memory;
unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
* numa_faults_buffer records faults per node during the current
* scan window. When the scan completes, the counts in
* numa_faults_memory decay and these values are copied.
*/
unsigned long *numa_faults_buffer_memory;
/*
* Track the nodes the process was running on when a NUMA hinting
* fault was incurred.
*/
unsigned long *numa_faults_cpu;
unsigned long *numa_faults_buffer_cpu;
/*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local. The task scan period is adapted
......@@ -1661,6 +1695,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
......@@ -2052,6 +2089,10 @@ static inline void tsk_restore_flags(struct task_struct *task,
task->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
......@@ -2760,7 +2801,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
extern int _cond_resched(void);
#define cond_resched() ({ \
__might_sleep(__FILE__, __LINE__, 0); \
___might_sleep(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
......@@ -2773,14 +2814,14 @@ extern int __cond_resched_lock(spinlock_t *lock);
#endif
#define cond_resched_lock(lock) ({ \
__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
......
......@@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
/* __wait_queue::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
struct __wait_queue {
unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
void *private;
wait_queue_func_t func;
struct list_head task_list;
......@@ -258,11 +261,37 @@ __out: __ret; \
*/
#define wait_event(wq, condition) \
do { \
might_sleep(); \
if (condition) \
break; \
__wait_event(wq, condition); \
} while (0)
#define __wait_event_freezable(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule(); try_to_freeze())
/**
* wait_event - sleep (or freeze) until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
* to system load) until the @condition evaluates to true. The
* @condition is checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
#define wait_event_freezable(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_freezable(wq, condition); \
__ret; \
})
#define __wait_event_timeout(wq, condition, timeout) \
___wait_event(wq, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, 0, timeout, \
......@@ -290,11 +319,30 @@ do { \
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_timeout(wq, condition, timeout); \
__ret; \
})
#define __wait_event_freezable_timeout(wq, condition, timeout) \
___wait_event(wq, ___wait_cond_timeout(condition), \
TASK_INTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret); try_to_freeze())
/*
* like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
* increasing load and is freezable.
*/
#define wait_event_freezable_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_freezable_timeout(wq, condition, timeout); \
__ret; \
})
#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
cmd1; schedule(); cmd2)
......@@ -315,6 +363,7 @@ do { \
*/
#define wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
might_sleep(); \
if (condition) \
break; \
__wait_event_cmd(wq, condition, cmd1, cmd2); \
......@@ -342,6 +391,7 @@ do { \
#define wait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_interruptible(wq, condition); \
__ret; \
......@@ -375,6 +425,7 @@ do { \
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_interruptible_timeout(wq, \
condition, timeout); \
......@@ -425,6 +476,7 @@ do { \
#define wait_event_hrtimeout(wq, condition, timeout) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_hrtimeout(wq, condition, timeout, \
TASK_UNINTERRUPTIBLE); \
......@@ -450,6 +502,7 @@ do { \
#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
({ \
long __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_hrtimeout(wq, condition, timeout, \
TASK_INTERRUPTIBLE); \
......@@ -463,12 +516,27 @@ do { \
#define wait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_interruptible_exclusive(wq, condition);\
__ret; \
})
#define __wait_event_freezable_exclusive(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
schedule(); try_to_freeze())
#define wait_event_freezable_exclusive(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_freezable_exclusive(wq, condition);\
__ret; \
})
#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
({ \
int __ret = 0; \
......@@ -637,6 +705,7 @@ do { \
#define wait_event_killable(wq, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_event_killable(wq, condition); \
__ret; \
......@@ -830,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
......@@ -886,6 +957,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
static inline int
wait_on_bit(void *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
......@@ -910,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_io(void *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
......@@ -936,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
......@@ -963,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
static inline int
wait_on_bit_lock(void *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
......@@ -986,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_lock_io(void *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
......@@ -1011,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
static inline int
wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
......@@ -1029,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned
static inline
int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
{
might_sleep();
if (atomic_read(val) == 0)
return 0;
return out_of_line_wait_on_atomic_t(val, action, mode);
......
......@@ -897,6 +897,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
if (!__rc) { \
*(__timeo) = schedule_timeout(*(__timeo)); \
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
__rc = __condition; \
__rc; \
......
......@@ -97,16 +97,19 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
long state = p->state;
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
/*
* For all intents and purposes a preempted task is a running task.
*/
if (task_preempt_count(p) & PREEMPT_ACTIVE)
if (preempt_count() & PREEMPT_ACTIVE)
state = TASK_RUNNING | TASK_STATE_MAX;
#endif
#endif /* CONFIG_PREEMPT */
return state;
}
#endif
#endif /* CREATE_TRACE_POINTS */
/*
* Tracepoint for task switches, performed by the scheduler:
......
......@@ -23,8 +23,8 @@
#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
and is now available for re-use. */
#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
#define CLONE_NEWIPC 0x08000000 /* New ipcs */
#define CLONE_NEWUTS 0x04000000 /* New utsname namespace */
#define CLONE_NEWIPC 0x08000000 /* New ipc namespace */
#define CLONE_NEWUSER 0x10000000 /* New user namespace */
#define CLONE_NEWPID 0x20000000 /* New pid namespace */
#define CLONE_NEWNET 0x40000000 /* New network namespace */
......
......@@ -499,7 +499,6 @@ static int kauditd_thread(void *dummy)
set_freezable();
while (!kthread_should_stop()) {
struct sk_buff *skb;
DECLARE_WAITQUEUE(wait, current);
flush_hold_queue();
......@@ -514,16 +513,8 @@ static int kauditd_thread(void *dummy)
audit_printk_skb(skb);
continue;
}
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&kauditd_wait, &wait);
if (!skb_queue_len(&audit_skb_queue)) {
try_to_freeze();
schedule();
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&kauditd_wait, &wait);
wait_event_freezable(kauditd_wait, skb_queue_len(&audit_skb_queue));
}
return 0;
}
......
......@@ -506,6 +506,16 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
goto out;
}
/*
* We can't shrink if we won't have enough room for SCHED_DEADLINE
* tasks.
*/
ret = -EBUSY;
if (is_cpu_exclusive(cur) &&
!cpuset_cpumask_can_shrink(cur->cpus_allowed,
trial->cpus_allowed))
goto out;
ret = 0;
out:
rcu_read_unlock();
......@@ -1429,17 +1439,8 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
goto out_unlock;
cgroup_taskset_for_each(task, tset) {
/*
* Kthreads which disallow setaffinity shouldn't be moved
* to a new cpuset; we don't want to change their cpu
* affinity and isolating such threads by their set of
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
* before cpus_allowed may be changed.
*/
ret = -EINVAL;
if (task->flags & PF_NO_SETAFFINITY)
ret = task_can_attach(task, cs->cpus_allowed);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
......
......@@ -997,6 +997,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
get_task_struct(p);
read_unlock(&tasklist_lock);
sched_annotate_sleep();
if ((exit_code & 0x7f) == 0) {
why = CLD_EXITED;
status = exit_code >> 8;
......@@ -1079,6 +1081,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* thread can reap it because we its state == DEAD/TRACE.
*/
read_unlock(&tasklist_lock);
sched_annotate_sleep();
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
......@@ -1210,6 +1213,7 @@ static int wait_task_stopped(struct wait_opts *wo,
pid = task_pid_vnr(p);
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
read_unlock(&tasklist_lock);
sched_annotate_sleep();
if (unlikely(wo->wo_flags & WNOWAIT))
return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
......@@ -1272,6 +1276,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
pid = task_pid_vnr(p);
get_task_struct(p);
read_unlock(&tasklist_lock);
sched_annotate_sleep();
if (!wo->wo_info) {
retval = wo->wo_rusage
......
......@@ -378,8 +378,14 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* reschedule now, before we try-lock the mutex. This avoids getting
* scheduled out right after we obtained the mutex.
*/
if (need_resched())
if (need_resched()) {
/*
* We _should_ have TASK_RUNNING here, but just in case
* we do not, make it so, otherwise we might get stuck.
*/
__set_current_state(TASK_RUNNING);
schedule_preempt_disabled();
}
return false;
}
......
......@@ -3096,6 +3096,32 @@ static int may_init_module(void)
return 0;
}
/*
* Can't use wait_event_interruptible() because our condition
* 'finished_loading()' contains a blocking primitive itself (mutex_lock).
*/
static int wait_finished_loading(struct module *mod)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int ret = 0;
add_wait_queue(&module_wq, &wait);
for (;;) {
if (finished_loading(mod->name))
break;
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(&module_wq, &wait);
return ret;
}
/*
* We try to place it in the list now to make sure it's unique before
* we dedicate too many resources. In particular, temporary percpu
......@@ -3116,8 +3142,8 @@ static int add_unformed_module(struct module *mod)
|| old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
finished_loading(mod->name));
err = wait_finished_loading(mod);
if (err)
goto out_unlocked;
goto again;
......
......@@ -148,7 +148,7 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout. The caller is accounted as waiting
* for IO.
* for IO (which traditionally means blkio only).
*/
void __sched wait_for_completion_io(struct completion *x)
{
......@@ -163,7 +163,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible. The caller is accounted as waiting for IO.
* interruptible. The caller is accounted as waiting for IO (which traditionally
* means blkio only).
*
* Return: 0 if timed out, and positive (at least 1, or number of jiffies left
* till timeout) if completed.
......
This diff is collapsed.
......@@ -25,9 +25,6 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
int cpudl_init(struct cpudl *cp);
void cpudl_cleanup(struct cpudl *cp);
#else
#define cpudl_set(cp, cpu, dl) do { } while (0)
#define cpudl_init() do { } while (0)
#endif /* CONFIG_SMP */
#endif /* _LINUX_CPUDL_H */
......@@ -26,9 +26,6 @@ int cpupri_find(struct cpupri *cp,
void cpupri_set(struct cpupri *cp, int cpu, int pri);
int cpupri_init(struct cpupri *cp);
void cpupri_cleanup(struct cpupri *cp);
#else
#define cpupri_set(cp, cpu, pri) do { } while (0)
#define cpupri_init() do { } while (0)
#endif
#endif /* _LINUX_CPUPRI_H */
......@@ -563,11 +563,6 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
{
struct hrtimer *timer = &dl_se->dl_timer;
if (hrtimer_active(timer)) {
hrtimer_try_to_cancel(timer);
return;
}
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
}
......@@ -633,7 +628,7 @@ static void update_curr_dl(struct rq *rq)
sched_rt_avg_update(rq, delta_exec);
dl_se->runtime -= delta_exec;
dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
if (dl_runtime_exceeded(rq, dl_se)) {
__dequeue_task_dl(rq, curr, 0);
if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
......@@ -933,7 +928,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
struct task_struct *curr;
struct rq *rq;
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
if (sd_flag != SD_BALANCE_WAKE)
goto out;
rq = cpu_rq(cpu);
......@@ -1018,6 +1013,10 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
hrtick_start(rq, p->dl.runtime);
}
#else /* !CONFIG_SCHED_HRTICK */
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
}
#endif
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
......@@ -1071,10 +1070,8 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
/* Running task will never be pushed. */
dequeue_pushable_dl_task(rq, p);
#ifdef CONFIG_SCHED_HRTICK
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
#endif
set_post_schedule(rq);
......@@ -1093,10 +1090,8 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
#ifdef CONFIG_SCHED_HRTICK
if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
start_hrtick_dl(rq, p);
#endif
}
static void task_fork_dl(struct task_struct *p)
......@@ -1333,6 +1328,7 @@ static int push_dl_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *later_rq;
int ret = 0;
if (!rq->dl.overloaded)
return 0;
......@@ -1378,7 +1374,6 @@ static int push_dl_task(struct rq *rq)
* The task is still there. We don't try
* again, some other cpu will pull it when ready.
*/
dequeue_pushable_dl_task(rq, next_task);
goto out;
}
......@@ -1394,6 +1389,7 @@ static int push_dl_task(struct rq *rq)
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, later_rq->cpu);
activate_task(later_rq, next_task, 0);
ret = 1;
resched_curr(later_rq);
......@@ -1402,7 +1398,7 @@ static int push_dl_task(struct rq *rq)
out:
put_task_struct(next_task);
return 1;
return ret;
}
static void push_dl_tasks(struct rq *rq)
......@@ -1508,7 +1504,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
p->nr_cpus_allowed > 1 &&
dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 ||
dl_entity_preempt(&rq->curr->dl, &p->dl))) {
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
push_dl_tasks(rq);
}
}
......@@ -1517,10 +1513,33 @@ static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask)
{
struct rq *rq;
struct root_domain *src_rd;
int weight;
BUG_ON(!dl_task(p));
rq = task_rq(p);
src_rd = rq->rd;
/*
* Migrating a SCHED_DEADLINE task between exclusive
* cpusets (different root_domains) entails a bandwidth
* update. We already made space for us in the destination
* domain (see cpuset_can_attach()).
*/
if (!cpumask_intersects(src_rd->span, new_mask)) {
struct dl_bw *src_dl_b;
src_dl_b = dl_bw_of(cpu_of(rq));
/*
* We now free resources of the root_domain we are migrating
* off. In the worst case, sched_setattr() may temporary fail
* until we complete the update.
*/
raw_spin_lock(&src_dl_b->lock);
__dl_clear(src_dl_b, p->dl.dl_bw);
raw_spin_unlock(&src_dl_b->lock);
}
/*
* Update only if the task is actually running (i.e.,
* it is on the rq AND it is not throttled).
......@@ -1537,8 +1556,6 @@ static void set_cpus_allowed_dl(struct task_struct *p,
if ((p->nr_cpus_allowed > 1) == (weight > 1))
return;
rq = task_rq(p);
/*
* The process used to be able to migrate OR it can now migrate
*/
......@@ -1586,22 +1603,48 @@ void init_sched_dl_class(void)
#endif /* CONFIG_SMP */
/*
* Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
*/
static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
{
struct hrtimer *dl_timer = &p->dl.dl_timer;
/* Nobody will change task's class if pi_lock is held */
lockdep_assert_held(&p->pi_lock);
if (hrtimer_active(dl_timer)) {
int ret = hrtimer_try_to_cancel(dl_timer);
if (unlikely(ret == -1)) {
/*
* Note, p may migrate OR new deadline tasks
* may appear in rq when we are unlocking it.
* A caller of us must be fine with that.
*/
raw_spin_unlock(&rq->lock);
hrtimer_cancel(dl_timer);
raw_spin_lock(&rq->lock);
}
}
}
static void switched_from_dl(struct rq *rq, struct task_struct *p)
{
if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
hrtimer_try_to_cancel(&p->dl.dl_timer);
cancel_dl_timer(rq, p);
__dl_clear_params(p);
#ifdef CONFIG_SMP
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
* from an overloaded cpu, if any.
*/
if (!rq->dl.dl_nr_running)
pull_dl_task(rq);
#endif
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
return;
if (pull_dl_task(rq))
resched_curr(rq);
}
/*
......@@ -1622,7 +1665,8 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
push_dl_task(rq) && rq != task_rq(p))
/* Only reschedule if pushing failed */
check_resched = 0;
#endif /* CONFIG_SMP */
......@@ -1704,3 +1748,12 @@ const struct sched_class dl_sched_class = {
.update_curr = update_curr_dl,
};
#ifdef CONFIG_SCHED_DEBUG
extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
void print_dl_stats(struct seq_file *m, int cpu)
{
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
}
#endif /* CONFIG_SCHED_DEBUG */
......@@ -261,6 +261,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
#undef P
}
void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
{
SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
}
extern __read_mostly int sched_clock_running;
static void print_cpu(struct seq_file *m, int cpu)
......@@ -329,6 +335,7 @@ do { \
spin_lock_irqsave(&sched_debug_lock, flags);
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
print_dl_stats(m, cpu);
print_rq(m, rq, cpu);
spin_unlock_irqrestore(&sched_debug_lock, flags);
......@@ -528,8 +535,8 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
unsigned long nr_faults = -1;
int cpu_current, home_node;
if (p->numa_faults_memory)
nr_faults = p->numa_faults_memory[2*node + i];
if (p->numa_faults)
nr_faults = p->numa_faults[2*node + i];
cpu_current = !i ? (task_node(p) == node) :
(pol && node_isset(node, pol->v.nodes));
......
This diff is collapsed.
......@@ -1301,9 +1301,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
struct task_struct *curr;
struct rq *rq;
if (p->nr_cpus_allowed == 1)
goto out;
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
......@@ -1351,16 +1348,22 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
if (rq->curr->nr_cpus_allowed == 1)
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1
&& cpupri_find(&rq->rd->cpupri, p, NULL))
return;
if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
/*
* There appears to be other cpus that can accept
* current and none to run 'p', so lets reschedule
......
......@@ -176,6 +176,25 @@ struct dl_bw {
u64 bw, total_bw;
};
static inline
void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
{
dl_b->total_bw -= tsk_bw;
}
static inline
void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
{
dl_b->total_bw += tsk_bw;
}
static inline
bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
{
return dl_b->bw != -1 &&
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
}
extern struct mutex sched_domains_mutex;
#ifdef CONFIG_CGROUP_SCHED
......@@ -678,7 +697,25 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task;
}
#ifdef CONFIG_NUMA
enum numa_topology_type {
NUMA_DIRECT,
NUMA_GLUELESS_MESH,
NUMA_BACKPLANE,
};
extern enum numa_topology_type sched_numa_topology_type;
extern int sched_max_numa_distance;
extern bool find_numa_distance(int distance);
#endif
#ifdef CONFIG_NUMA_BALANCING
/* The regions in numa_faults array from task_struct */
enum numa_faults_stats {
NUMA_MEM = 0,
NUMA_CPU,
NUMA_MEMBUF,
NUMA_CPUBUF
};
extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
extern int migrate_swap(struct task_struct *, struct task_struct *);
......@@ -1127,6 +1164,11 @@ struct sched_class {
void (*task_fork) (struct task_struct *p);
void (*task_dead) (struct task_struct *p);
/*
* The switched_from() call is allowed to drop rq->lock, therefore we
* cannot assume the switched_from/switched_to pair is serliazed by
* rq->lock. They are however serialized by p->pi_lock.
*/
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
......@@ -1504,6 +1546,7 @@ extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
extern void print_cfs_stats(struct seq_file *m, int cpu);
extern void print_rt_stats(struct seq_file *m, int cpu);
extern void print_dl_stats(struct seq_file *m, int cpu);
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
......
......@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/wait.h>
#include <linux/hash.h>
#include <linux/kthread.h>
void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
......@@ -297,6 +298,71 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
}
EXPORT_SYMBOL(autoremove_wake_function);
static inline bool is_kthread_should_stop(void)
{
return (current->flags & PF_KTHREAD) && kthread_should_stop();
}
/*
* DEFINE_WAIT_FUNC(wait, woken_wake_func);
*
* add_wait_queue(&wq, &wait);
* for (;;) {
* if (condition)
* break;
*
* p->state = mode; condition = true;
* smp_mb(); // A smp_wmb(); // C
* if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN;
* schedule() try_to_wake_up();
* p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
* wait->flags &= ~WQ_FLAG_WOKEN; condition = true;
* smp_mb() // B smp_wmb(); // C
* wait->flags |= WQ_FLAG_WOKEN;
* }
* remove_wait_queue(&wq, &wait);
*
*/
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
{
set_current_state(mode); /* A */
/*
* The above implies an smp_mb(), which matches with the smp_wmb() from
* woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
* also observe all state before the wakeup.
*/
if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
timeout = schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
/*
* The below implies an smp_mb(), it too pairs with the smp_wmb() from
* woken_wake_function() such that we must either observe the wait
* condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
* an event.
*/
set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
return timeout;
}
EXPORT_SYMBOL(wait_woken);
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
/*
* Although this function is called under waitqueue lock, LOCK
* doesn't imply write barrier and the users expects write
* barrier semantics on wakeup functions. The following
* smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
* and is paired with set_mb() in wait_woken().
*/
smp_wmb(); /* C */
wait->flags |= WQ_FLAG_WOKEN;
return default_wake_function(wait, mode, sync, key);
}
EXPORT_SYMBOL(woken_wake_function);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
{
struct wait_bit_key *key = arg;
......
......@@ -110,7 +110,7 @@ static int smpboot_thread_fn(void *data)
set_current_state(TASK_INTERRUPTIBLE);
preempt_disable();
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
__set_current_state(TASK_RUNNING);
preempt_enable();
if (ht->cleanup)
ht->cleanup(td->cpu, cpu_online(td->cpu));
......@@ -136,26 +136,27 @@ static int smpboot_thread_fn(void *data)
/* Check for state change setup */
switch (td->status) {
case HP_THREAD_NONE:
__set_current_state(TASK_RUNNING);
preempt_enable();
if (ht->setup)
ht->setup(td->cpu);
td->status = HP_THREAD_ACTIVE;
preempt_disable();
break;
continue;
case HP_THREAD_PARKED:
__set_current_state(TASK_RUNNING);
preempt_enable();
if (ht->unpark)
ht->unpark(td->cpu);
td->status = HP_THREAD_ACTIVE;
preempt_disable();
break;
continue;
}
if (!ht->thread_should_run(td->cpu)) {
preempt_enable();
preempt_enable_no_resched();
schedule();
} else {
set_current_state(TASK_RUNNING);
__set_current_state(TASK_RUNNING);
preempt_enable();
ht->thread_fn(td->cpu);
}
......
......@@ -101,11 +101,11 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
static DECLARE_WAIT_QUEUE_HEAD(rfcomm_wq);
static void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
wake_up_process(rfcomm_thread);
wake_up_all(&rfcomm_wq);
}
/* ---- RFCOMM FCS computation ---- */
......@@ -2086,24 +2086,22 @@ static void rfcomm_kill_listener(void)
static int rfcomm_run(void *unused)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG("");
set_user_nice(current, -10);
rfcomm_add_listener(BDADDR_ANY);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
add_wait_queue(&rfcomm_wq, &wait);
while (!kthread_should_stop()) {
/* Process stuff */
rfcomm_process_sessions();
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&rfcomm_wq, &wait);
rfcomm_kill_listener();
......
......@@ -7200,11 +7200,10 @@ static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
*/
struct net *net;
bool unregistering;
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) {
prepare_to_wait(&netdev_unregistering_wq, &wait,
TASK_UNINTERRUPTIBLE);
unregistering = false;
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
......@@ -7216,9 +7215,10 @@ static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
if (!unregistering)
break;
__rtnl_unlock();
schedule();
wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
finish_wait(&netdev_unregistering_wq, &wait);
remove_wait_queue(&netdev_unregistering_wq, &wait);
}
static void __net_exit default_device_exit_batch(struct list_head *net_list)
......
......@@ -365,11 +365,10 @@ static void rtnl_lock_unregistering_all(void)
{
struct net *net;
bool unregistering;
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) {
prepare_to_wait(&netdev_unregistering_wq, &wait,
TASK_UNINTERRUPTIBLE);
unregistering = false;
rtnl_lock();
for_each_net(net) {
......@@ -381,9 +380,10 @@ static void rtnl_lock_unregistering_all(void)
if (!unregistering)
break;
__rtnl_unlock();
schedule();
wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
finish_wait(&netdev_unregistering_wq, &wait);
remove_wait_queue(&netdev_unregistering_wq, &wait);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment