Commit 71ce1dee authored by Nicolas Pitre's avatar Nicolas Pitre

ARM: bL_switcher: move to dedicated threads rather than workqueues

The workqueues are problematic as they may be contended.
They can't be scheduled with top priority either.  Also the optimization
in bL_switch_request() to skip the workqueue entirely when the target CPU
and the calling CPU were the same didn't allow for bL_switch_request() to
be called from atomic context, as might be the case for some cpufreq
drivers.

Let's move to dedicated kthreads instead.
Signed-off-by: default avatarNicolas Pitre <nico@linaro.org>
parent 3f09d479
...@@ -15,8 +15,10 @@ ...@@ -15,8 +15,10 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/cpu.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/workqueue.h> #include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/tick.h> #include <linux/tick.h>
...@@ -219,15 +221,48 @@ static int bL_switch_to(unsigned int new_cluster_id) ...@@ -219,15 +221,48 @@ static int bL_switch_to(unsigned int new_cluster_id)
return ret; return ret;
} }
struct switch_args { struct bL_thread {
unsigned int cluster; struct task_struct *task;
struct work_struct work; wait_queue_head_t wq;
int wanted_cluster;
}; };
static void __bL_switch_to(struct work_struct *work) static struct bL_thread bL_threads[NR_CPUS];
static int bL_switcher_thread(void *arg)
{
struct bL_thread *t = arg;
struct sched_param param = { .sched_priority = 1 };
int cluster;
sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
do {
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible(t->wq,
t->wanted_cluster != -1 ||
kthread_should_stop());
cluster = xchg(&t->wanted_cluster, -1);
if (cluster != -1)
bL_switch_to(cluster);
} while (!kthread_should_stop());
return 0;
}
static struct task_struct * __init bL_switcher_thread_create(int cpu, void *arg)
{ {
struct switch_args *args = container_of(work, struct switch_args, work); struct task_struct *task;
bL_switch_to(args->cluster);
task = kthread_create_on_node(bL_switcher_thread, arg,
cpu_to_node(cpu), "kswitcher_%d", cpu);
if (!IS_ERR(task)) {
kthread_bind(task, cpu);
wake_up_process(task);
} else
pr_err("%s failed for CPU %d\n", __func__, cpu);
return task;
} }
/* /*
...@@ -236,26 +271,46 @@ static void __bL_switch_to(struct work_struct *work) ...@@ -236,26 +271,46 @@ static void __bL_switch_to(struct work_struct *work)
* @cpu: the CPU to switch * @cpu: the CPU to switch
* @new_cluster_id: the ID of the cluster to switch to. * @new_cluster_id: the ID of the cluster to switch to.
* *
* This function causes a cluster switch on the given CPU. If the given * This function causes a cluster switch on the given CPU by waking up
* CPU is the same as the calling CPU then the switch happens right away. * the appropriate switcher thread. This function may or may not return
* Otherwise the request is put on a work queue to be scheduled on the * before the switch has occurred.
* remote CPU.
*/ */
void bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
{ {
unsigned int this_cpu = get_cpu(); struct bL_thread *t;
struct switch_args args;
if (cpu == this_cpu) { if (cpu >= ARRAY_SIZE(bL_threads)) {
bL_switch_to(new_cluster_id); pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
put_cpu(); return -EINVAL;
return;
} }
put_cpu();
args.cluster = new_cluster_id; t = &bL_threads[cpu];
INIT_WORK_ONSTACK(&args.work, __bL_switch_to); if (IS_ERR(t->task))
schedule_work_on(cpu, &args.work); return PTR_ERR(t->task);
flush_work(&args.work); if (!t->task)
return -ESRCH;
t->wanted_cluster = new_cluster_id;
wake_up(&t->wq);
return 0;
} }
EXPORT_SYMBOL_GPL(bL_switch_request); EXPORT_SYMBOL_GPL(bL_switch_request);
static int __init bL_switcher_init(void)
{
int cpu;
pr_info("big.LITTLE switcher initializing\n");
for_each_online_cpu(cpu) {
struct bL_thread *t = &bL_threads[cpu];
init_waitqueue_head(&t->wq);
t->wanted_cluster = -1;
t->task = bL_switcher_thread_create(cpu, t);
}
pr_info("big.LITTLE switcher initialized\n");
return 0;
}
late_initcall(bL_switcher_init);
...@@ -12,6 +12,6 @@ ...@@ -12,6 +12,6 @@
#ifndef ASM_BL_SWITCHER_H #ifndef ASM_BL_SWITCHER_H
#define ASM_BL_SWITCHER_H #define ASM_BL_SWITCHER_H
void bL_switch_request(unsigned int cpu, unsigned int new_cluster_id); int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment