Commit 00619f7c authored by Peter Zijlstra's avatar Peter Zijlstra

sched,livepatch: Use task_call_func()

Instead of frobbing around with scheduler internals, use the shiny new
task_call_func() interface.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarPetr Mladek <pmladek@suse.com>
Acked-by: default avatarMiroslav Benes <mbenes@suse.cz>
Acked-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Tested-by: default avatarPetr Mladek <pmladek@suse.com>
Tested-by: Vasily Gorbik <gor@linux.ibm.com> # on s390
Link: https://lkml.kernel.org/r/20210929152428.709906138@infradead.org
parent 9b3c4ab3
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include "core.h" #include "core.h"
#include "patch.h" #include "patch.h"
#include "transition.h" #include "transition.h"
#include "../sched/sched.h"
#define MAX_STACK_ENTRIES 100 #define MAX_STACK_ENTRIES 100
#define STACK_ERR_BUF_SIZE 128 #define STACK_ERR_BUF_SIZE 128
...@@ -240,7 +239,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, ...@@ -240,7 +239,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
* Determine whether it's safe to transition the task to the target patch state * Determine whether it's safe to transition the task to the target patch state
* by looking for any to-be-patched or to-be-unpatched functions on its stack. * by looking for any to-be-patched or to-be-unpatched functions on its stack.
*/ */
static int klp_check_stack(struct task_struct *task, char *err_buf) static int klp_check_stack(struct task_struct *task, const char **oldname)
{ {
static unsigned long entries[MAX_STACK_ENTRIES]; static unsigned long entries[MAX_STACK_ENTRIES];
struct klp_object *obj; struct klp_object *obj;
...@@ -248,12 +247,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) ...@@ -248,12 +247,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
int ret, nr_entries; int ret, nr_entries;
ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
if (ret < 0) { if (ret < 0)
snprintf(err_buf, STACK_ERR_BUF_SIZE, return -EINVAL;
"%s: %s:%d has an unreliable stack\n",
__func__, task->comm, task->pid);
return ret;
}
nr_entries = ret; nr_entries = ret;
klp_for_each_object(klp_transition_patch, obj) { klp_for_each_object(klp_transition_patch, obj) {
...@@ -262,11 +257,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) ...@@ -262,11 +257,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
klp_for_each_func(obj, func) { klp_for_each_func(obj, func) {
ret = klp_check_stack_func(func, entries, nr_entries); ret = klp_check_stack_func(func, entries, nr_entries);
if (ret) { if (ret) {
snprintf(err_buf, STACK_ERR_BUF_SIZE, *oldname = func->old_name;
"%s: %s:%d is sleeping on function %s\n", return -EADDRINUSE;
__func__, task->comm, task->pid,
func->old_name);
return ret;
} }
} }
} }
...@@ -274,6 +266,22 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) ...@@ -274,6 +266,22 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
return 0; return 0;
} }
static int klp_check_and_switch_task(struct task_struct *task, void *arg)
{
int ret;
if (task_curr(task) && task != current)
return -EBUSY;
ret = klp_check_stack(task, arg);
if (ret)
return ret;
clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
task->patch_state = klp_target_state;
return 0;
}
/* /*
* Try to safely switch a task to the target patch state. If it's currently * Try to safely switch a task to the target patch state. If it's currently
* running, or it's sleeping on a to-be-patched or to-be-unpatched function, or * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
...@@ -281,13 +289,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) ...@@ -281,13 +289,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
*/ */
static bool klp_try_switch_task(struct task_struct *task) static bool klp_try_switch_task(struct task_struct *task)
{ {
static char err_buf[STACK_ERR_BUF_SIZE]; const char *old_name;
struct rq *rq;
struct rq_flags flags;
int ret; int ret;
bool success = false;
err_buf[0] = '\0';
/* check if this task has already switched over */ /* check if this task has already switched over */
if (task->patch_state == klp_target_state) if (task->patch_state == klp_target_state)
...@@ -305,36 +308,31 @@ static bool klp_try_switch_task(struct task_struct *task) ...@@ -305,36 +308,31 @@ static bool klp_try_switch_task(struct task_struct *task)
* functions. If all goes well, switch the task to the target patch * functions. If all goes well, switch the task to the target patch
* state. * state.
*/ */
rq = task_rq_lock(task, &flags); ret = task_call_func(task, klp_check_and_switch_task, &old_name);
switch (ret) {
case 0: /* success */
break;
if (task_running(rq, task) && task != current) { case -EBUSY: /* klp_check_and_switch_task() */
snprintf(err_buf, STACK_ERR_BUF_SIZE, pr_debug("%s: %s:%d is running\n",
"%s: %s:%d is running\n", __func__, task->comm, __func__, task->comm, task->pid);
task->pid); break;
goto done; case -EINVAL: /* klp_check_and_switch_task() */
pr_debug("%s: %s:%d has an unreliable stack\n",
__func__, task->comm, task->pid);
break;
case -EADDRINUSE: /* klp_check_and_switch_task() */
pr_debug("%s: %s:%d is sleeping on function %s\n",
__func__, task->comm, task->pid, old_name);
break;
default:
pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
__func__, ret, task->comm, task->pid);
break;
} }
ret = klp_check_stack(task, err_buf); return !ret;
if (ret)
goto done;
success = true;
clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
task->patch_state = klp_target_state;
done:
task_rq_unlock(rq, task, &flags);
/*
* Due to console deadlock issues, pr_debug() can't be used while
* holding the task rq lock. Instead we have to use a temporary buffer
* and print the debug message after releasing the lock.
*/
if (err_buf[0] != '\0')
pr_debug("%s", err_buf);
return success;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment