Commit 1f1f642e authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

make cancel_xxx_work_sync() return a boolean

Change cancel_work_sync() and cancel_delayed_work_sync() to return a boolean
indicating whether the work was actually cancelled.  A zero return value means
that the work was not pending/queued.

Without that kind of change it is not possible to avoid flush_workqueue()
sometimes, see the next patch as an example.

Also, this patch unifies both functions and kills the (unlikely) busy-wait
loop.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Acked-by: default avatarJarek Poplawski <jarkao2@o2.pl>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f5a421a4
...@@ -148,7 +148,7 @@ extern int keventd_up(void); ...@@ -148,7 +148,7 @@ extern int keventd_up(void);
extern void init_workqueues(void); extern void init_workqueues(void);
int execute_in_process_context(work_func_t fn, struct execute_work *); int execute_in_process_context(work_func_t fn, struct execute_work *);
extern void cancel_work_sync(struct work_struct *work); extern int cancel_work_sync(struct work_struct *work);
/* /*
* Kill off a pending schedule_delayed_work(). Note that the work callback * Kill off a pending schedule_delayed_work(). Note that the work callback
...@@ -166,7 +166,7 @@ static inline int cancel_delayed_work(struct delayed_work *work) ...@@ -166,7 +166,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
return ret; return ret;
} }
extern void cancel_delayed_work_sync(struct delayed_work *work); extern int cancel_delayed_work_sync(struct delayed_work *work);
/* Obsolete. use cancel_delayed_work_sync() */ /* Obsolete. use cancel_delayed_work_sync() */
static inline static inline
......
...@@ -382,16 +382,16 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) ...@@ -382,16 +382,16 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
/* /*
* Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit, * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
* so this work can't be re-armed in any way. * so this work can't be re-armed in any way.
*/ */
static int try_to_grab_pending(struct work_struct *work) static int try_to_grab_pending(struct work_struct *work)
{ {
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
int ret = 0; int ret = -1;
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
return 1; return 0;
/* /*
* The queueing is in progress, or it is already queued. Try to * The queueing is in progress, or it is already queued. Try to
...@@ -457,10 +457,28 @@ static void wait_on_work(struct work_struct *work) ...@@ -457,10 +457,28 @@ static void wait_on_work(struct work_struct *work)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
} }
static int __cancel_work_timer(struct work_struct *work,
struct timer_list* timer)
{
int ret;
do {
ret = (timer && likely(del_timer(timer)));
if (!ret)
ret = try_to_grab_pending(work);
wait_on_work(work);
} while (unlikely(ret < 0));
work_clear_pending(work);
return ret;
}
/** /**
* cancel_work_sync - block until a work_struct's callback has terminated * cancel_work_sync - block until a work_struct's callback has terminated
* @work: the work which is to be flushed * @work: the work which is to be flushed
* *
* Returns true if @work was pending.
*
* cancel_work_sync() will cancel the work if it is queued. If the work's * cancel_work_sync() will cancel the work if it is queued. If the work's
* callback appears to be running, cancel_work_sync() will block until it * callback appears to be running, cancel_work_sync() will block until it
* has completed. * has completed.
...@@ -476,12 +494,9 @@ static void wait_on_work(struct work_struct *work) ...@@ -476,12 +494,9 @@ static void wait_on_work(struct work_struct *work)
* The caller must ensure that workqueue_struct on which this work was last * The caller must ensure that workqueue_struct on which this work was last
* queued can't be destroyed before this function returns. * queued can't be destroyed before this function returns.
*/ */
void cancel_work_sync(struct work_struct *work) int cancel_work_sync(struct work_struct *work)
{ {
while (!try_to_grab_pending(work)) return __cancel_work_timer(work, NULL);
cpu_relax();
wait_on_work(work);
work_clear_pending(work);
} }
EXPORT_SYMBOL_GPL(cancel_work_sync); EXPORT_SYMBOL_GPL(cancel_work_sync);
...@@ -489,16 +504,14 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); ...@@ -489,16 +504,14 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
* cancel_delayed_work_sync - reliably kill off a delayed work. * cancel_delayed_work_sync - reliably kill off a delayed work.
* @dwork: the delayed work struct * @dwork: the delayed work struct
* *
* Returns true if @dwork was pending.
*
* It is possible to use this function if @dwork rearms itself via queue_work() * It is possible to use this function if @dwork rearms itself via queue_work()
* or queue_delayed_work(). See also the comment for cancel_work_sync(). * or queue_delayed_work(). See also the comment for cancel_work_sync().
*/ */
void cancel_delayed_work_sync(struct delayed_work *dwork) int cancel_delayed_work_sync(struct delayed_work *dwork)
{ {
while (!del_timer(&dwork->timer) && return __cancel_work_timer(&dwork->work, &dwork->timer);
!try_to_grab_pending(&dwork->work))
cpu_relax();
wait_on_work(&dwork->work);
work_clear_pending(&dwork->work);
} }
EXPORT_SYMBOL(cancel_delayed_work_sync); EXPORT_SYMBOL(cancel_delayed_work_sync);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment