Commit a58653cc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Two fixes: a crash fix for an ARM SoC platform, and kernel-doc
  warnings fixes"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/rt: Do not pull from current CPU if only one CPU to pull
  sched/core: Fix kernel-doc warnings after code movement
parents 3fba3614 f73c52a5
...@@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ...@@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
return ret; return ret;
} }
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*
* Return: On success, 0 and the timeslice is in @interval. Otherwise,
* an error code.
*/
static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
{ {
struct task_struct *p; struct task_struct *p;
...@@ -5144,6 +5133,17 @@ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) ...@@ -5144,6 +5133,17 @@ static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
return retval; return retval;
} }
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*
* Return: On success, 0 and the timeslice is in @interval. Otherwise,
* an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval) struct timespec __user *, interval)
{ {
......
...@@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq) ...@@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq)
bool resched = false; bool resched = false;
struct task_struct *p; struct task_struct *p;
struct rq *src_rq; struct rq *src_rq;
int rt_overload_count = rt_overloaded(this_rq);
if (likely(!rt_overloaded(this_rq))) if (likely(!rt_overload_count))
return; return;
/* /*
...@@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq) ...@@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq)
*/ */
smp_rmb(); smp_rmb();
/* If we are the only overloaded CPU do nothing */
if (rt_overload_count == 1 &&
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
return;
#ifdef HAVE_RT_PUSH_IPI #ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) { if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq); tell_cpu_to_push(this_rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment