Commit e64ab2db authored by Linus Torvalds's avatar Linus Torvalds

watch_queue: Fix missing locking in add_watch_to_object()

If a watch is being added to a queue, it needs to guard against
interference from addition of a new watch, manual removal of a watch and
removal of a watch due to some other queue being destroyed.

KEYCTL_WATCH_KEY guards against this for the same {key,queue} pair by
holding the key->sem writelocked and by holding refs on both the key and
the queue - but that doesn't prevent interaction from other {key,queue}
pairs.

While add_watch_to_object() does take the spinlock on the event queue,
it doesn't take the lock on the source's watch list.  The assumption was
that the caller would prevent that (say by taking key->sem) - but that
doesn't prevent interference from the destruction of another queue.

Fix this by locking the watcher list in add_watch_to_object().

Fixes: c73be61c ("pipe: Add general notification queue support")
Reported-by: syzbot+03d7b43290037d1f87ca@syzkaller.appspotmail.com
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: keyrings@vger.kernel.org
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e0339f03
...@@ -454,6 +454,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue) ...@@ -454,6 +454,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
rcu_assign_pointer(watch->queue, wqueue); rcu_assign_pointer(watch->queue, wqueue);
} }
static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
{
const struct cred *cred;
struct watch *w;
hlist_for_each_entry(w, &wlist->watchers, list_node) {
struct watch_queue *wq = rcu_access_pointer(w->queue);
if (wqueue == wq && watch->id == w->id)
return -EBUSY;
}
cred = current_cred();
if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
atomic_dec(&cred->user->nr_watches);
return -EAGAIN;
}
watch->cred = get_cred(cred);
rcu_assign_pointer(watch->watch_list, wlist);
kref_get(&wqueue->usage);
kref_get(&watch->usage);
hlist_add_head(&watch->queue_node, &wqueue->watches);
hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
return 0;
}
/** /**
* add_watch_to_object - Add a watch on an object to a watch list * add_watch_to_object - Add a watch on an object to a watch list
* @watch: The watch to add * @watch: The watch to add
...@@ -468,34 +495,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue) ...@@ -468,34 +495,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
*/ */
int add_watch_to_object(struct watch *watch, struct watch_list *wlist) int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
{ {
struct watch_queue *wqueue = rcu_access_pointer(watch->queue); struct watch_queue *wqueue;
struct watch *w; int ret = -ENOENT;
hlist_for_each_entry(w, &wlist->watchers, list_node) {
struct watch_queue *wq = rcu_access_pointer(w->queue);
if (wqueue == wq && watch->id == w->id)
return -EBUSY;
}
watch->cred = get_current_cred();
rcu_assign_pointer(watch->watch_list, wlist);
if (atomic_inc_return(&watch->cred->user->nr_watches) > rcu_read_lock();
task_rlimit(current, RLIMIT_NOFILE)) {
atomic_dec(&watch->cred->user->nr_watches);
put_cred(watch->cred);
return -EAGAIN;
}
wqueue = rcu_access_pointer(watch->queue);
if (lock_wqueue(wqueue)) { if (lock_wqueue(wqueue)) {
kref_get(&wqueue->usage); spin_lock(&wlist->lock);
kref_get(&watch->usage); ret = add_one_watch(watch, wlist, wqueue);
hlist_add_head(&watch->queue_node, &wqueue->watches); spin_unlock(&wlist->lock);
unlock_wqueue(wqueue); unlock_wqueue(wqueue);
} }
hlist_add_head_rcu(&watch->list_node, &wlist->watchers); rcu_read_unlock();
return 0; return ret;
} }
EXPORT_SYMBOL(add_watch_to_object); EXPORT_SYMBOL(add_watch_to_object);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment