Commit 219f170a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: don't allow setuid to succeed if the user does not have rt bandwidth
  sched_rt: don't start timer when rt bandwidth disabled
parents 3024e4a9 54e99124
...@@ -2291,9 +2291,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); ...@@ -2291,9 +2291,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
extern int sched_group_set_rt_period(struct task_group *tg, extern int sched_group_set_rt_period(struct task_group *tg,
long rt_period_us); long rt_period_us);
extern long sched_group_rt_period(struct task_group *tg); extern long sched_group_rt_period(struct task_group *tg);
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif #endif
#endif #endif
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);
#ifdef CONFIG_TASK_XACCT #ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt) static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{ {
......
...@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ...@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{ {
ktime_t now; ktime_t now;
if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return; return;
if (hrtimer_active(&rt_b->rt_period_timer)) if (hrtimer_active(&rt_b->rt_period_timer))
...@@ -9224,6 +9224,16 @@ static int sched_rt_global_constraints(void) ...@@ -9224,6 +9224,16 @@ static int sched_rt_global_constraints(void)
return ret; return ret;
} }
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */ #else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void) static int sched_rt_global_constraints(void)
{ {
...@@ -9317,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, ...@@ -9317,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk) struct task_struct *tsk)
{ {
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
/* Don't accept realtime tasks when there is no way for them to run */ if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
return -EINVAL; return -EINVAL;
#else #else
/* We don't support RT-tasks being in separate groups */ /* We don't support RT-tasks being in separate groups */
......
...@@ -559,7 +559,7 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) ...@@ -559,7 +559,7 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
abort_creds(new); abort_creds(new);
return retval; return retval;
} }
/* /*
* change the user struct in a credentials set to match the new UID * change the user struct in a credentials set to match the new UID
*/ */
...@@ -571,6 +571,11 @@ static int set_user(struct cred *new) ...@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
if (!new_user) if (!new_user)
return -EAGAIN; return -EAGAIN;
if (!task_can_switch_user(new_user, current)) {
free_uid(new_user);
return -EINVAL;
}
if (atomic_read(&new_user->processes) >= if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur && current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) { new_user != INIT_USER) {
...@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) ...@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
goto error; goto error;
} }
retval = -EAGAIN; if (new->uid != old->uid) {
if (new->uid != old->uid && set_user(new) < 0) retval = set_user(new);
goto error; if (retval < 0)
goto error;
}
if (ruid != (uid_t) -1 || if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old->uid)) (euid != (uid_t) -1 && euid != old->uid))
new->suid = new->euid; new->suid = new->euid;
...@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) ...@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
retval = -EPERM; retval = -EPERM;
if (capable(CAP_SETUID)) { if (capable(CAP_SETUID)) {
new->suid = new->uid = uid; new->suid = new->uid = uid;
if (uid != old->uid && set_user(new) < 0) { if (uid != old->uid) {
retval = -EAGAIN; retval = set_user(new);
goto error; if (retval < 0)
goto error;
} }
} else if (uid != old->uid && uid != new->suid) { } else if (uid != old->uid && uid != new->suid) {
goto error; goto error;
...@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) ...@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
goto error; goto error;
} }
retval = -EAGAIN;
if (ruid != (uid_t) -1) { if (ruid != (uid_t) -1) {
new->uid = ruid; new->uid = ruid;
if (ruid != old->uid && set_user(new) < 0) if (ruid != old->uid) {
goto error; retval = set_user(new);
if (retval < 0)
goto error;
}
} }
if (euid != (uid_t) -1) if (euid != (uid_t) -1)
new->euid = euid; new->euid = euid;
......
...@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags) ...@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
#endif #endif
#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
/*
* We need to check if a setuid can take place. This function should be called
* before successfully completing the setuid.
*/
int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
{
return sched_rt_can_attach(up->tg, tsk);
}
#else
int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
{
return 1;
}
#endif
/* /*
* Locate the user_struct for the passed UID. If found, take a ref on it. The * Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid(). * caller must undo that ref with free_uid().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment