Commit 137e88e6 authored by Mathias Krause's avatar Mathias Krause Committed by Khalid Elmously

padata: ensure padata_do_serial() runs on the correct CPU

BugLink: https://bugs.launchpad.net/bugs/1883916

commit 350ef88e upstream.

If the algorithm we're parallelizing is asynchronous we might change
CPUs between padata_do_parallel() and padata_do_serial(). However, we
don't expect this to happen as we need to enqueue the padata object into
the per-cpu reorder queue we took it from, i.e. the same-cpu's parallel
queue.

Ensure we're not switching CPUs for a given padata object by tracking
the CPU within the padata object. If the serial callback gets called on
the wrong CPU, defer invoking padata_reorder() via a kernel worker on
the CPU we're expected to run on.
Signed-off-by: default avatarMathias Krause <minipli@googlemail.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Cc: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent b423a88e
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
* @list: List entry, to attach to the padata lists. * @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure. * @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon. * @cb_cpu: Callback cpu for serializatioon.
* @cpu: Cpu for parallelization.
* @seq_nr: Sequence number of the parallelized data object. * @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function. * @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function. * @parallel: Parallel execution function.
...@@ -46,6 +47,7 @@ struct padata_priv { ...@@ -46,6 +47,7 @@ struct padata_priv {
struct list_head list; struct list_head list;
struct parallel_data *pd; struct parallel_data *pd;
int cb_cpu; int cb_cpu;
int cpu;
int info; int info;
void (*parallel)(struct padata_priv *padata); void (*parallel)(struct padata_priv *padata);
void (*serial)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata);
......
...@@ -132,6 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst, ...@@ -132,6 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst,
padata->cb_cpu = cb_cpu; padata->cb_cpu = cb_cpu;
target_cpu = padata_cpu_hash(pd); target_cpu = padata_cpu_hash(pd);
padata->cpu = target_cpu;
queue = per_cpu_ptr(pd->pqueue, target_cpu); queue = per_cpu_ptr(pd->pqueue, target_cpu);
spin_lock(&queue->parallel.lock); spin_lock(&queue->parallel.lock);
...@@ -375,10 +376,21 @@ void padata_do_serial(struct padata_priv *padata) ...@@ -375,10 +376,21 @@ void padata_do_serial(struct padata_priv *padata)
int cpu; int cpu;
struct padata_parallel_queue *pqueue; struct padata_parallel_queue *pqueue;
struct parallel_data *pd; struct parallel_data *pd;
int reorder_via_wq = 0;
pd = padata->pd; pd = padata->pd;
cpu = get_cpu(); cpu = get_cpu();
/* We need to run on the same CPU padata_do_parallel(.., padata, ..)
* was called on -- or, at least, enqueue the padata object into the
* correct per-cpu queue.
*/
if (cpu != padata->cpu) {
reorder_via_wq = 1;
cpu = padata->cpu;
}
pqueue = per_cpu_ptr(pd->pqueue, cpu); pqueue = per_cpu_ptr(pd->pqueue, cpu);
spin_lock(&pqueue->reorder.lock); spin_lock(&pqueue->reorder.lock);
...@@ -395,6 +407,12 @@ void padata_do_serial(struct padata_priv *padata) ...@@ -395,6 +407,12 @@ void padata_do_serial(struct padata_priv *padata)
put_cpu(); put_cpu();
/* If we're running on the wrong CPU, call padata_reorder() via a
* kernel worker.
*/
if (reorder_via_wq)
queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
else
padata_reorder(pd); padata_reorder(pd);
} }
EXPORT_SYMBOL(padata_do_serial); EXPORT_SYMBOL(padata_do_serial);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment