Commit 45d153c0 authored by Daniel Jordan's avatar Daniel Jordan Committed by Herbert Xu

padata: use separate workqueues for parallel and serial work

padata currently uses one per-CPU workqueue per instance for all work.

Prepare for running parallel jobs on an unbound workqueue by introducing
dedicated workqueues for parallel and serial work.
Signed-off-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent cc491d8e
...@@ -127,7 +127,8 @@ struct parallel_data { ...@@ -127,7 +127,8 @@ struct parallel_data {
* struct padata_instance - The overall control structure. * struct padata_instance - The overall control structure.
* *
* @cpu_notifier: cpu hotplug notifier. * @cpu_notifier: cpu hotplug notifier.
* @wq: The workqueue in use. * @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
* @pd: The internal control structure. * @pd: The internal control structure.
* @cpumask: User supplied cpumasks for parallel and serial works. * @cpumask: User supplied cpumasks for parallel and serial works.
* @cpumask_change_notifier: Notifiers chain for user-defined notify * @cpumask_change_notifier: Notifiers chain for user-defined notify
...@@ -139,7 +140,8 @@ struct parallel_data { ...@@ -139,7 +140,8 @@ struct parallel_data {
*/ */
struct padata_instance { struct padata_instance {
struct hlist_node node; struct hlist_node node;
struct workqueue_struct *wq; struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
struct parallel_data *pd; struct parallel_data *pd;
struct padata_cpumask cpumask; struct padata_cpumask cpumask;
struct blocking_notifier_head cpumask_change_notifier; struct blocking_notifier_head cpumask_change_notifier;
......
...@@ -152,7 +152,7 @@ int padata_do_parallel(struct padata_instance *pinst, ...@@ -152,7 +152,7 @@ int padata_do_parallel(struct padata_instance *pinst,
list_add_tail(&padata->list, &queue->parallel.list); list_add_tail(&padata->list, &queue->parallel.list);
spin_unlock(&queue->parallel.lock); spin_unlock(&queue->parallel.lock);
queue_work_on(target_cpu, pinst->wq, &queue->work); queue_work_on(target_cpu, pinst->parallel_wq, &queue->work);
out: out:
rcu_read_unlock_bh(); rcu_read_unlock_bh();
...@@ -261,7 +261,7 @@ static void padata_reorder(struct parallel_data *pd) ...@@ -261,7 +261,7 @@ static void padata_reorder(struct parallel_data *pd)
list_add_tail(&padata->list, &squeue->serial.list); list_add_tail(&padata->list, &squeue->serial.list);
spin_unlock(&squeue->serial.lock); spin_unlock(&squeue->serial.lock);
queue_work_on(cb_cpu, pinst->wq, &squeue->work); queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
} }
spin_unlock_bh(&pd->lock); spin_unlock_bh(&pd->lock);
...@@ -278,7 +278,7 @@ static void padata_reorder(struct parallel_data *pd) ...@@ -278,7 +278,7 @@ static void padata_reorder(struct parallel_data *pd)
next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
if (!list_empty(&next_queue->reorder.list)) if (!list_empty(&next_queue->reorder.list))
queue_work(pinst->wq, &pd->reorder_work); queue_work(pinst->serial_wq, &pd->reorder_work);
} }
static void invoke_padata_reorder(struct work_struct *work) static void invoke_padata_reorder(struct work_struct *work)
...@@ -818,7 +818,8 @@ static void __padata_free(struct padata_instance *pinst) ...@@ -818,7 +818,8 @@ static void __padata_free(struct padata_instance *pinst)
padata_free_pd(pinst->pd); padata_free_pd(pinst->pd);
free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu); free_cpumask_var(pinst->cpumask.cbcpu);
destroy_workqueue(pinst->wq); destroy_workqueue(pinst->serial_wq);
destroy_workqueue(pinst->parallel_wq);
kfree(pinst); kfree(pinst);
} }
...@@ -967,18 +968,23 @@ static struct padata_instance *padata_alloc(const char *name, ...@@ -967,18 +968,23 @@ static struct padata_instance *padata_alloc(const char *name,
if (!pinst) if (!pinst)
goto err; goto err;
pinst->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_MEM_RECLAIM |
1, name); WQ_CPU_INTENSIVE, 1, name);
if (!pinst->wq) if (!pinst->parallel_wq)
goto err_free_inst; goto err_free_inst;
get_online_cpus(); get_online_cpus();
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
WQ_CPU_INTENSIVE, 1, name);
if (!pinst->serial_wq)
goto err_put_cpus; goto err_put_cpus;
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_serial_wq;
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu);
goto err_put_cpus; goto err_free_serial_wq;
} }
if (!padata_validate_cpumask(pinst, pcpumask) || if (!padata_validate_cpumask(pinst, pcpumask) ||
!padata_validate_cpumask(pinst, cbcpumask)) !padata_validate_cpumask(pinst, cbcpumask))
...@@ -1010,9 +1016,11 @@ static struct padata_instance *padata_alloc(const char *name, ...@@ -1010,9 +1016,11 @@ static struct padata_instance *padata_alloc(const char *name,
err_free_masks: err_free_masks:
free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu); free_cpumask_var(pinst->cpumask.cbcpu);
err_free_serial_wq:
destroy_workqueue(pinst->serial_wq);
err_put_cpus: err_put_cpus:
put_online_cpus(); put_online_cpus();
destroy_workqueue(pinst->wq); destroy_workqueue(pinst->parallel_wq);
err_free_inst: err_free_inst:
kfree(pinst); kfree(pinst);
err: err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment