Commit f8d570a4 authored by David Miller's avatar David Miller Committed by Linus Torvalds

net: Fix recursive descent in __scm_destroy().

__scm_destroy() walks the list of file descriptors in the scm_fp_list
pointed to by the scm_cookie argument.

Those, in turn, can close sockets and invoke __scm_destroy() again.

There is nothing which limits how deeply this can occur.

The idea for how to fix this is from Linus.  Basically, we do all of
the fput()s at the top level by collecting all of the scm_fp_list
objects hit by an fput().  Inside of the initial __scm_destroy() we
keep running the list until it is empty.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 75fa6770
...@@ -1349,6 +1349,8 @@ struct task_struct { ...@@ -1349,6 +1349,8 @@ struct task_struct {
*/ */
unsigned long timer_slack_ns; unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns; unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
}; };
/* /*
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
struct scm_fp_list struct scm_fp_list
{ {
struct list_head list;
int count; int count;
struct file *fp[SCM_MAX_FD]; struct file *fp[SCM_MAX_FD];
}; };
......
...@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) ...@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (!fpl) if (!fpl)
return -ENOMEM; return -ENOMEM;
*fplp = fpl; *fplp = fpl;
INIT_LIST_HEAD(&fpl->list);
fpl->count = 0; fpl->count = 0;
} }
fpp = &fpl->fp[fpl->count]; fpp = &fpl->fp[fpl->count];
...@@ -106,10 +107,26 @@ void __scm_destroy(struct scm_cookie *scm) ...@@ -106,10 +107,26 @@ void __scm_destroy(struct scm_cookie *scm)
if (fpl) { if (fpl) {
scm->fp = NULL; scm->fp = NULL;
if (current->scm_work_list) {
list_add_tail(&fpl->list, current->scm_work_list);
} else {
LIST_HEAD(work_list);
current->scm_work_list = &work_list;
list_add(&fpl->list, &work_list);
while (!list_empty(&work_list)) {
fpl = list_first_entry(&work_list, struct scm_fp_list, list);
list_del(&fpl->list);
for (i=fpl->count-1; i>=0; i--) for (i=fpl->count-1; i>=0; i--)
fput(fpl->fp[i]); fput(fpl->fp[i]);
kfree(fpl); kfree(fpl);
} }
current->scm_work_list = NULL;
}
}
} }
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
...@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) ...@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
if (new_fpl) { if (new_fpl) {
INIT_LIST_HEAD(&new_fpl->list);
for (i=fpl->count-1; i>=0; i--) for (i=fpl->count-1; i>=0; i--)
get_file(fpl->fp[i]); get_file(fpl->fp[i]);
memcpy(new_fpl, fpl, sizeof(*fpl)); memcpy(new_fpl, fpl, sizeof(*fpl));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment