Commit 6ebe8661 authored by Chia-I Wu's avatar Chia-I Wu Committed by Gerd Hoffmann

drm/virtio: move locking into virtio_gpu_queue_ctrl_sgs

Signed-off-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200205181955.202485-8-olvaffe@gmail.comSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent db2e2072
...@@ -318,18 +318,43 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) ...@@ -318,18 +318,43 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt; return sgt;
} }
static bool virtio_gpu_queue_ctrl_sgs_locked(struct virtio_gpu_device *vgdev, static bool virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_vbuffer *vbuf,
struct scatterlist **sgs, struct virtio_gpu_fence *fence,
int outcnt, int elemcnt,
int incnt) struct scatterlist **sgs,
int outcnt,
int incnt)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify = false; bool notify = false;
int ret; int ret;
if (!vgdev->vqs_ready) again:
spin_lock(&vgdev->ctrlq.qlock);
if (vq->num_free < elemcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
goto again;
}
/* now that the position of the vbuf in the virtqueue is known, we can
* finally set the fence id
*/
if (fence) {
virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
fence);
if (vbuf->objs) {
virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
virtio_gpu_array_unlock_resv(vbuf->objs);
}
}
if (!vgdev->vqs_ready) {
spin_unlock(&vgdev->ctrlq.qlock);
return notify; return notify;
}
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret); WARN_ON(ret);
...@@ -338,6 +363,8 @@ static bool virtio_gpu_queue_ctrl_sgs_locked(struct virtio_gpu_device *vgdev, ...@@ -338,6 +363,8 @@ static bool virtio_gpu_queue_ctrl_sgs_locked(struct virtio_gpu_device *vgdev,
notify = virtqueue_kick_prepare(vq); notify = virtqueue_kick_prepare(vq);
spin_unlock(&vgdev->ctrlq.qlock);
return notify; return notify;
} }
...@@ -345,7 +372,6 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ...@@ -345,7 +372,6 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp; struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL; struct sg_table *sgt = NULL;
int elemcnt = 0, outcnt = 0, incnt = 0; int elemcnt = 0, outcnt = 0, incnt = 0;
...@@ -387,34 +413,8 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ...@@ -387,34 +413,8 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
incnt++; incnt++;
} }
again: notify = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs,
spin_lock(&vgdev->ctrlq.qlock); outcnt, incnt);
/*
* Make sure we have enouth space in the virtqueue. If not
* wait here until we have.
*
* Without that virtio_gpu_queue_ctrl_buffer_nolock might have
* to wait for free space, which can result in fence ids being
* submitted out-of-order.
*/
if (vq->num_free < elemcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
goto again;
}
if (fence) {
virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
fence);
if (vbuf->objs) {
virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
virtio_gpu_array_unlock_resv(vbuf->objs);
}
}
notify = virtio_gpu_queue_ctrl_sgs_locked(vgdev, vbuf, sgs, outcnt,
incnt);
spin_unlock(&vgdev->ctrlq.qlock);
if (notify) { if (notify) {
if (vgdev->disable_notify) if (vgdev->disable_notify)
vgdev->pending_notify = true; vgdev->pending_notify = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment