Commit 3c25a55d authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo

workqueue: rename wq->flush_mutex to wq->mutex

Currently pwq->flush_mutex protects many fields of a workqueue
including, especially, the pwqs list.  We're going to expand this
mutex to protect most of a workqueue and eventually replace pwq_lock,
which will make locking simpler and easier to understand.

Drop the "flush_" prefix in preparation.

This patch is pure rename.

tj: Rebased on top of the current dev branch.  Updated description.
    Use WQ: and WR: instead of Q: and QR: for synchronization labels.
Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 68e13a67
...@@ -118,8 +118,6 @@ enum { ...@@ -118,8 +118,6 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If * cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L. * POOL_DISASSOCIATED is set, it's identical to L.
* *
* F: wq->flush_mutex protected.
*
* MG: pool->manager_mutex and pool->lock protected. Writes require both * MG: pool->manager_mutex and pool->lock protected. Writes require both
* locks. Reads can happen under either lock. * locks. Reads can happen under either lock.
* *
...@@ -129,8 +127,10 @@ enum { ...@@ -129,8 +127,10 @@ enum {
* *
* PW: pwq_lock protected. * PW: pwq_lock protected.
* *
* FR: wq->flush_mutex and pwq_lock protected for writes. Sched-RCU * WQ: wq->mutex protected.
* protected for reads. *
* WR: wq->mutex and pwq_lock protected for writes. Sched-RCU protected
* for reads.
* *
* MD: wq_mayday_lock protected. * MD: wq_mayday_lock protected.
*/ */
...@@ -197,7 +197,7 @@ struct pool_workqueue { ...@@ -197,7 +197,7 @@ struct pool_workqueue {
int nr_active; /* L: nr of active works */ int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */ int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */ struct list_head delayed_works; /* L: delayed works */
struct list_head pwqs_node; /* FR: node on wq->pwqs */ struct list_head pwqs_node; /* WR: node on wq->pwqs */
struct list_head mayday_node; /* MD: node on wq->maydays */ struct list_head mayday_node; /* MD: node on wq->maydays */
/* /*
...@@ -214,8 +214,8 @@ struct pool_workqueue { ...@@ -214,8 +214,8 @@ struct pool_workqueue {
* Structure used to wait for workqueue flush. * Structure used to wait for workqueue flush.
*/ */
struct wq_flusher { struct wq_flusher {
struct list_head list; /* F: list of flushers */ struct list_head list; /* WQ: list of flushers */
int flush_color; /* F: flush color waiting for */ int flush_color; /* WQ: flush color waiting for */
struct completion done; /* flush completion */ struct completion done; /* flush completion */
}; };
...@@ -228,16 +228,16 @@ struct wq_device; ...@@ -228,16 +228,16 @@ struct wq_device;
struct workqueue_struct { struct workqueue_struct {
unsigned int flags; /* PL: WQ_* flags */ unsigned int flags; /* PL: WQ_* flags */
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
struct list_head pwqs; /* FR: all pwqs of this wq */ struct list_head pwqs; /* WR: all pwqs of this wq */
struct list_head list; /* PL: list of all workqueues */ struct list_head list; /* PL: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */ struct mutex mutex; /* protects this wq */
int work_color; /* F: current work color */ int work_color; /* WQ: current work color */
int flush_color; /* F: current flush color */ int flush_color; /* WQ: current flush color */
atomic_t nr_pwqs_to_flush; /* flush in progress */ atomic_t nr_pwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* F: first flusher */ struct wq_flusher *first_flusher; /* WQ: first flusher */
struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_queue; /* WQ: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */ struct list_head flusher_overflow; /* WQ: flush overflow list */
struct list_head maydays; /* MD: pwqs requesting rescue */ struct list_head maydays; /* MD: pwqs requesting rescue */
struct worker *rescuer; /* I: rescue worker */ struct worker *rescuer; /* I: rescue worker */
...@@ -2460,7 +2460,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, ...@@ -2460,7 +2460,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
* advanced to @work_color. * advanced to @work_color.
* *
* CONTEXT: * CONTEXT:
* mutex_lock(wq->flush_mutex). * mutex_lock(wq->mutex).
* *
* RETURNS: * RETURNS:
* %true if @flush_color >= 0 and there's something to flush. %false * %true if @flush_color >= 0 and there's something to flush. %false
...@@ -2529,7 +2529,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2529,7 +2529,7 @@ void flush_workqueue(struct workqueue_struct *wq)
lock_map_acquire(&wq->lockdep_map); lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map); lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->flush_mutex); mutex_lock(&wq->mutex);
/* /*
* Start-to-wait phase * Start-to-wait phase
...@@ -2574,7 +2574,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2574,7 +2574,7 @@ void flush_workqueue(struct workqueue_struct *wq)
list_add_tail(&this_flusher.list, &wq->flusher_overflow); list_add_tail(&this_flusher.list, &wq->flusher_overflow);
} }
mutex_unlock(&wq->flush_mutex); mutex_unlock(&wq->mutex);
wait_for_completion(&this_flusher.done); wait_for_completion(&this_flusher.done);
...@@ -2587,7 +2587,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2587,7 +2587,7 @@ void flush_workqueue(struct workqueue_struct *wq)
if (wq->first_flusher != &this_flusher) if (wq->first_flusher != &this_flusher)
return; return;
mutex_lock(&wq->flush_mutex); mutex_lock(&wq->mutex);
/* we might have raced, check again with mutex held */ /* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher) if (wq->first_flusher != &this_flusher)
...@@ -2659,7 +2659,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2659,7 +2659,7 @@ void flush_workqueue(struct workqueue_struct *wq)
} }
out_unlock: out_unlock:
mutex_unlock(&wq->flush_mutex); mutex_unlock(&wq->mutex);
} }
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
...@@ -3550,15 +3550,15 @@ static void pwq_unbound_release_workfn(struct work_struct *work) ...@@ -3550,15 +3550,15 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
return; return;
/* /*
* Unlink @pwq. Synchronization against flush_mutex isn't strictly * Unlink @pwq. Synchronization against wq->mutex isn't strictly
* necessary on release but do it anyway. It's easier to verify * necessary on release but do it anyway. It's easier to verify
* and consistent with the linking path. * and consistent with the linking path.
*/ */
mutex_lock(&wq->flush_mutex); mutex_lock(&wq->mutex);
spin_lock_irq(&pwq_lock); spin_lock_irq(&pwq_lock);
list_del_rcu(&pwq->pwqs_node); list_del_rcu(&pwq->pwqs_node);
spin_unlock_irq(&pwq_lock); spin_unlock_irq(&pwq_lock);
mutex_unlock(&wq->flush_mutex); mutex_unlock(&wq->mutex);
put_unbound_pool(pool); put_unbound_pool(pool);
call_rcu_sched(&pwq->rcu, rcu_free_pwq); call_rcu_sched(&pwq->rcu, rcu_free_pwq);
...@@ -3627,12 +3627,12 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, ...@@ -3627,12 +3627,12 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
INIT_LIST_HEAD(&pwq->mayday_node); INIT_LIST_HEAD(&pwq->mayday_node);
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
mutex_lock(&wq->flush_mutex); mutex_lock(&wq->mutex);
spin_lock_irq(&pwq_lock); spin_lock_irq(&pwq_lock);
/* /*
* Set the matching work_color. This is synchronized with * Set the matching work_color. This is synchronized with
* flush_mutex to avoid confusing flush_workqueue(). * wq->mutex to avoid confusing flush_workqueue().
*/ */
if (p_last_pwq) if (p_last_pwq)
*p_last_pwq = first_pwq(wq); *p_last_pwq = first_pwq(wq);
...@@ -3645,7 +3645,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, ...@@ -3645,7 +3645,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
list_add_rcu(&pwq->pwqs_node, &wq->pwqs); list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
spin_unlock_irq(&pwq_lock); spin_unlock_irq(&pwq_lock);
mutex_unlock(&wq->flush_mutex); mutex_unlock(&wq->mutex);
} }
/** /**
...@@ -3762,7 +3762,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -3762,7 +3762,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
/* init wq */ /* init wq */
wq->flags = flags; wq->flags = flags;
wq->saved_max_active = max_active; wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex); mutex_init(&wq->mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0); atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->pwqs); INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment