Commit d677bb3e authored by Sakari Ailus's avatar Sakari Ailus Committed by Kleber Sacilotto de Souza

media: v4l: event: Prevent freeing event subscriptions while accessed

BugLink: https://bugs.launchpad.net/bugs/1798770

commit ad608fbc upstream.

The event subscriptions are added to the subscribed event list while
holding a spinlock, but that lock is subsequently released while still
accessing the subscription object. This makes it possible to unsubscribe
the event --- and freeing the subscription object's memory --- while
the subscription object is simultaneously accessed.

Prevent this by adding a mutex to serialise the event subscription and
unsubscription. This also gives a guarantee to the callback ops that the
add op has returned before the del op is called.

This change also results in making the elems field less special:
subscriptions are only added to the event list once they are fully
initialised.
Signed-off-by: default avatarSakari Ailus <sakari.ailus@linux.intel.com>
Reviewed-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Reviewed-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: stable@vger.kernel.org # for 4.14 and up
Fixes: c3b5b024 ("V4L/DVB: V4L: Events: Add backend")
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent 0a2fc306
......@@ -119,14 +119,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
/*
* If the event has been added to the fh->subscribed list, but its
* add op has not completed yet elems will be 0, treat this as
* not being subscribed.
*/
if (!sev->elems)
return;
/* Increase event sequence number on fh. */
fh->sequence++;
......@@ -212,6 +204,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
......@@ -229,31 +222,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
sev->elems = elems;
mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
/* Already listening */
kfree(sev);
return 0; /* Already listening */
goto out_unlock;
}
if (sev->ops && sev->ops->add) {
int ret = sev->ops->add(sev, elems);
ret = sev->ops->add(sev, elems);
if (ret) {
sev->ops = NULL;
v4l2_event_unsubscribe(fh, sub);
return ret;
kfree(sev);
goto out_unlock;
}
}
/* Mark as ready for use */
sev->elems = elems;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return 0;
out_unlock:
mutex_unlock(&fh->subscribe_lock);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
......@@ -292,6 +290,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
......@@ -310,6 +310,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
sev->ops->del(sev);
kfree(sev);
mutex_unlock(&fh->subscribe_lock);
return 0;
}
......
......@@ -49,6 +49,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
......@@ -93,6 +94,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
if (fh->vdev == NULL)
return;
v4l2_event_unsubscribe_all(fh);
mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
......
......@@ -43,6 +43,7 @@ struct v4l2_fh {
wait_queue_head_t wait;
struct list_head subscribed; /* Subscribed events */
struct list_head available; /* Dequeueable event */
struct mutex subscribe_lock;
unsigned int navailable;
u32 sequence;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment