Commit da758d51 authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing

Rework fencing workflow, starting with virtio_gpu_execbuffer_ioctl.
Stop using ttm helpers, use the virtio_gpu_array_* helpers (which work
on the reservation objects directly) instead.

Also store the object array in struct virtio_gpu_vbuffer, so we
explicitly keep a reference of all buffers used instead of depending
on ttm_bo_put() checking whenever the object is actually idle before
releasing it.

New workflow:

 (1) All gem objects needed by a command are added to a
     virtio_gpu_object_array.
 (2) All reservation objects will be locked (virtio_gpu_array_lock_resv).
 (3) virtio_gpu_fence_emit() completes fence initialization.
 (4) fence gets added to the objects, reservation objects are unlocked
     (virtio_gpu_array_add_fence, virtio_gpu_array_unlock_resv).
 (5) virtio command is submitted to the host.
 (6) The completion callback (virtio_gpu_dequeue_ctrl_func)
     will drop object references and free virtio_gpu_object_array.

v6: rewrite most of the patch.
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-9-kraxel@redhat.com
parent 98abe21d
...@@ -121,9 +121,9 @@ struct virtio_gpu_vbuffer { ...@@ -121,9 +121,9 @@ struct virtio_gpu_vbuffer {
char *resp_buf; char *resp_buf;
int resp_size; int resp_size;
virtio_gpu_resp_cb resp_cb; virtio_gpu_resp_cb resp_cb;
struct virtio_gpu_object_array *objs;
struct list_head list; struct list_head list;
}; };
...@@ -318,7 +318,9 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, ...@@ -318,7 +318,9 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id); uint32_t resource_id);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size, void *data, uint32_t data_size,
uint32_t ctx_id, struct virtio_gpu_fence *fence); uint32_t ctx_id,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
......
...@@ -107,16 +107,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -107,16 +107,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_virtgpu_execbuffer *exbuf = data; struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_gem_object *gobj;
struct virtio_gpu_fence *out_fence; struct virtio_gpu_fence *out_fence;
struct virtio_gpu_object *qobj;
int ret; int ret;
uint32_t *bo_handles = NULL; uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL; void __user *user_bo_handles = NULL;
struct list_head validate_list; struct virtio_gpu_object_array *buflist = NULL;
struct ttm_validate_buffer *buflist = NULL;
int i;
struct ww_acquire_ctx ticket;
struct sync_file *sync_file; struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd; int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1; int out_fence_fd = -1;
...@@ -157,15 +152,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -157,15 +152,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
return out_fence_fd; return out_fence_fd;
} }
INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) { if (exbuf->num_bo_handles) {
bo_handles = kvmalloc_array(exbuf->num_bo_handles, bo_handles = kvmalloc_array(exbuf->num_bo_handles,
sizeof(uint32_t), GFP_KERNEL); sizeof(uint32_t), GFP_KERNEL);
buflist = kvmalloc_array(exbuf->num_bo_handles, if (!bo_handles) {
sizeof(struct ttm_validate_buffer),
GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unused_fd; goto out_unused_fd;
} }
...@@ -177,25 +167,21 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -177,25 +167,21 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd; goto out_unused_fd;
} }
for (i = 0; i < exbuf->num_bo_handles; i++) { buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); exbuf->num_bo_handles);
if (!gobj) { if (!buflist) {
ret = -ENOENT; ret = -ENOENT;
goto out_unused_fd; goto out_unused_fd;
}
qobj = gem_to_virtio_gpu_obj(gobj);
buflist[i].bo = &qobj->tbo;
list_add(&buflist[i].head, &validate_list);
} }
kvfree(bo_handles); kvfree(bo_handles);
bo_handles = NULL; bo_handles = NULL;
} }
ret = virtio_gpu_object_list_validate(&ticket, &validate_list); if (buflist) {
if (ret) ret = virtio_gpu_array_lock_resv(buflist);
goto out_free; if (ret)
goto out_unused_fd;
}
buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) { if (IS_ERR(buf)) {
...@@ -222,24 +208,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -222,24 +208,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
} }
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, out_fence); vfpriv->ctx_id, buflist, out_fence);
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
kvfree(buflist);
return 0; return 0;
out_memdup: out_memdup:
kfree(buf); kfree(buf);
out_unresv: out_unresv:
ttm_eu_backoff_reservation(&ticket, &validate_list); if (buflist)
out_free: virtio_gpu_array_unlock_resv(buflist);
virtio_gpu_unref_list(&validate_list);
out_unused_fd: out_unused_fd:
kvfree(bo_handles); kvfree(bo_handles);
kvfree(buflist); if (buflist)
virtio_gpu_array_put_free(buflist);
if (out_fence_fd >= 0) if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd); put_unused_fd(out_fence_fd);
......
...@@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) ...@@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock); spin_unlock(&vgdev->ctrlq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
...@@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) ...@@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
} }
if (entry->resp_cb) if (entry->resp_cb)
entry->resp_cb(vgdev, entry); entry->resp_cb(vgdev, entry);
list_del(&entry->list);
free_vbuf(vgdev, entry);
} }
wake_up(&vgdev->ctrlq.ack_queue); wake_up(&vgdev->ctrlq.ack_queue);
if (fence_id) if (fence_id)
virtio_gpu_fence_event_process(vgdev, fence_id); virtio_gpu_fence_event_process(vgdev, fence_id);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free(entry->objs);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
} }
void virtio_gpu_dequeue_cursor_func(struct work_struct *work) void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
...@@ -337,6 +341,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ...@@ -337,6 +341,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (fence) if (fence)
virtio_gpu_fence_emit(vgdev, hdr, fence); virtio_gpu_fence_emit(vgdev, hdr, fence);
if (vbuf->objs) {
virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
virtio_gpu_array_unlock_resv(vbuf->objs);
}
notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock); spin_unlock(&vgdev->ctrlq.qlock);
if (notify) if (notify)
...@@ -940,7 +948,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, ...@@ -940,7 +948,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size, void *data, uint32_t data_size,
uint32_t ctx_id, struct virtio_gpu_fence *fence) uint32_t ctx_id,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
...@@ -950,6 +960,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, ...@@ -950,6 +960,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
vbuf->data_buf = data; vbuf->data_buf = data;
vbuf->data_size = data_size; vbuf->data_size = data_size;
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment