Commit f74370b8 authored by Elena Reshetova's avatar Elena Reshetova Committed by Linus Torvalds

ipc: convert sem_undo_list.refcnt from atomic_t to refcount_t

refcount_t type and corresponding API should be used instead of atomic_t
when the variable is used as a reference counter.  This allows to avoid
accidental refcounter overflows that might lead to use-after-free
situations.

Link: http://lkml.kernel.org/r/1499417992-3238-3-git-send-email-elena.reshetova@intel.comSigned-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Serge Hallyn <serge@hallyn.com>
Cc: <arozansk@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a2e0602c
...@@ -122,7 +122,7 @@ struct sem_undo { ...@@ -122,7 +122,7 @@ struct sem_undo {
* that may be shared among all a CLONE_SYSVSEM task group. * that may be shared among all a CLONE_SYSVSEM task group.
*/ */
struct sem_undo_list { struct sem_undo_list {
atomic_t refcnt; refcount_t refcnt;
spinlock_t lock; spinlock_t lock;
struct list_head list_proc; struct list_head list_proc;
}; };
...@@ -1642,7 +1642,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) ...@@ -1642,7 +1642,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
if (undo_list == NULL) if (undo_list == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&undo_list->lock); spin_lock_init(&undo_list->lock);
atomic_set(&undo_list->refcnt, 1); refcount_set(&undo_list->refcnt, 1);
INIT_LIST_HEAD(&undo_list->list_proc); INIT_LIST_HEAD(&undo_list->list_proc);
current->sysvsem.undo_list = undo_list; current->sysvsem.undo_list = undo_list;
...@@ -2041,7 +2041,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) ...@@ -2041,7 +2041,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
error = get_undo_list(&undo_list); error = get_undo_list(&undo_list);
if (error) if (error)
return error; return error;
atomic_inc(&undo_list->refcnt); refcount_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list; tsk->sysvsem.undo_list = undo_list;
} else } else
tsk->sysvsem.undo_list = NULL; tsk->sysvsem.undo_list = NULL;
...@@ -2070,7 +2070,7 @@ void exit_sem(struct task_struct *tsk) ...@@ -2070,7 +2070,7 @@ void exit_sem(struct task_struct *tsk)
return; return;
tsk->sysvsem.undo_list = NULL; tsk->sysvsem.undo_list = NULL;
if (!atomic_dec_and_test(&ulp->refcnt)) if (!refcount_dec_and_test(&ulp->refcnt))
return; return;
for (;;) { for (;;) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment