Commit 1c270b79 authored by Tejun Heo's avatar Tejun Heo

workqueue: Move nr_active handling into helpers

__queue_work(), pwq_dec_nr_in_flight() and wq_adjust_max_active() were
open-coding nr_active handling, which is fine given that the operations are
trivial. However, the planned unbound nr_active update will make them more
complicated, so let's move them into helpers.

- pwq_tryinc_nr_active() is added. It increments nr_active if under
  max_active limit and return a boolean indicating whether inc was
  successful. Note that the function is structured to accommodate future
  changes. __queue_work() is updated to use the new helper.

- pwq_activate_first_inactive() is updated to use pwq_tryinc_nr_active() and
  thus no longer assumes that nr_active is under max_active and returns a
  boolean to indicate whether a work item has been activated.

- wq_adjust_max_active() no longer tests directly whether a work item can be
  activated. Instead, it's updated to use the return value of
  pwq_activate_first_inactive() to tell whether a work item has been
  activated.

- nr_active decrement and activating the first inactive work item is
  factored into pwq_dec_nr_active().

v3: - WARN_ON_ONCE(!WORK_STRUCT_INACTIVE) added to __pwq_activate_work() as
      now we're calling the function unconditionally from
      pwq_activate_first_inactive().

v2: - wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <jiangshanlai@gmail.com>
parent 4c638030
......@@ -1468,11 +1468,14 @@ static bool pwq_is_empty(struct pool_workqueue *pwq)
static void __pwq_activate_work(struct pool_workqueue *pwq,
struct work_struct *work)
{
unsigned long *wdb = work_data_bits(work);
WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE));
trace_workqueue_activate_work(work);
if (list_empty(&pwq->pool->worklist))
pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
__clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
}
/**
......@@ -1497,12 +1500,66 @@ static bool pwq_activate_work(struct pool_workqueue *pwq,
return true;
}
static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
/**
* pwq_tryinc_nr_active - Try to increment nr_active for a pwq
* @pwq: pool_workqueue of interest
*
* Try to increment nr_active for @pwq. Returns %true if an nr_active count is
* successfully obtained. %false otherwise.
*/
static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq)
{
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
bool obtained;
lockdep_assert_held(&pool->lock);
obtained = pwq->nr_active < READ_ONCE(wq->max_active);
if (obtained)
pwq->nr_active++;
return obtained;
}
/**
* pwq_activate_first_inactive - Activate the first inactive work item on a pwq
* @pwq: pool_workqueue of interest
*
* Activate the first inactive work item of @pwq if available and allowed by
* max_active limit.
*
* Returns %true if an inactive work item has been activated. %false if no
* inactive work item is found or max_active limit is reached.
*/
static bool pwq_activate_first_inactive(struct pool_workqueue *pwq)
{
struct work_struct *work = list_first_entry(&pwq->inactive_works,
struct work_struct *work =
list_first_entry_or_null(&pwq->inactive_works,
struct work_struct, entry);
pwq_activate_work(pwq, work);
if (work && pwq_tryinc_nr_active(pwq)) {
__pwq_activate_work(pwq, work);
return true;
} else {
return false;
}
}
/**
* pwq_dec_nr_active - Retire an active count
* @pwq: pool_workqueue of interest
*
* Decrement @pwq's nr_active and try to activate the first inactive work item.
*/
static void pwq_dec_nr_active(struct pool_workqueue *pwq)
{
struct worker_pool *pool = pwq->pool;
lockdep_assert_held(&pool->lock);
pwq->nr_active--;
pwq_activate_first_inactive(pwq);
}
/**
......@@ -1520,14 +1577,8 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
{
int color = get_work_color(work_data);
if (!(work_data & WORK_STRUCT_INACTIVE)) {
pwq->nr_active--;
if (!list_empty(&pwq->inactive_works)) {
/* one down, submit an inactive one */
if (pwq->nr_active < READ_ONCE(pwq->wq->max_active))
pwq_activate_first_inactive(pwq);
}
}
if (!(work_data & WORK_STRUCT_INACTIVE))
pwq_dec_nr_active(pwq);
pwq->nr_in_flight[color]--;
......@@ -1829,13 +1880,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* @work must also queue behind existing inactive work items to maintain
* ordering when max_active changes. See wq_adjust_max_active().
*/
if (list_empty(&pwq->inactive_works) &&
pwq->nr_active < READ_ONCE(pwq->wq->max_active)) {
if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) {
if (list_empty(&pool->worklist))
pool->watchdog_ts = jiffies;
trace_workqueue_activate_work(work);
pwq->nr_active++;
insert_work(pwq, work, &pool->worklist, work_flags);
kick_pool(pool);
} else {
......@@ -4687,9 +4736,8 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
/* this function can be called during early boot w/ irq disabled */
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
while (!list_empty(&pwq->inactive_works) &&
pwq->nr_active < wq->max_active)
pwq_activate_first_inactive(pwq);
while (pwq_activate_first_inactive(pwq))
;
kick_pool(pwq->pool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment