Commit e2324300 authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: rework virtio_gpu_object_create fencing

Rework fencing workflow.  Stop using ttm helpers, use the
virtio_gpu_array_* helpers instead.

Due to using the gem reservation object it is initialized and ready for
use before calling ttm_bo_init.  So we can simply use the standard
fencing workflow and drop the tricky logic which checks whenever the
command is in flight still.

v6: rewrite most of the patch.
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-10-kraxel@redhat.com
parent da758d51
...@@ -274,6 +274,7 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); ...@@ -274,6 +274,7 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params, struct virtio_gpu_object_params *params,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence); struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id); uint32_t resource_id);
...@@ -336,6 +337,7 @@ void ...@@ -336,6 +337,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params, struct virtio_gpu_object_params *params,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence); struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq);
......
...@@ -103,6 +103,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, ...@@ -103,6 +103,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object **bo_ptr, struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_object_array *objs = NULL;
struct virtio_gpu_object *bo; struct virtio_gpu_object *bo;
size_t acc_size; size_t acc_size;
int ret; int ret;
...@@ -116,23 +117,34 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, ...@@ -116,23 +117,34 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
if (ret < 0) { if (ret < 0)
kfree(bo); goto err_free_gem;
return ret;
}
params->size = roundup(params->size, PAGE_SIZE); params->size = roundup(params->size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size); ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
if (ret != 0) { if (ret != 0)
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); goto err_put_id;
kfree(bo);
return ret;
}
bo->dumb = params->dumb; bo->dumb = params->dumb;
if (fence) {
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
if (!objs)
goto err_put_id;
virtio_gpu_array_add_obj(objs, &bo->gem_base);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_objs;
}
if (params->virgl) { if (params->virgl) {
virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence); virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
objs, fence);
} else { } else {
virtio_gpu_cmd_create_resource(vgdev, bo, params, fence); virtio_gpu_cmd_create_resource(vgdev, bo, params,
objs, fence);
} }
virtio_gpu_init_ttm_placement(bo); virtio_gpu_init_ttm_placement(bo);
...@@ -145,40 +157,16 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, ...@@ -145,40 +157,16 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
if (ret != 0) if (ret != 0)
return ret; return ret;
if (fence) {
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct list_head validate_list;
struct ttm_validate_buffer mainbuf;
struct ww_acquire_ctx ticket;
unsigned long irq_flags;
bool signaled;
INIT_LIST_HEAD(&validate_list);
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
/* use a gem reference since unref list undoes them */
drm_gem_object_get(&bo->gem_base);
mainbuf.bo = &bo->tbo;
list_add(&mainbuf.head, &validate_list);
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
if (ret == 0) {
spin_lock_irqsave(&drv->lock, irq_flags);
signaled = virtio_fence_signaled(&fence->f);
if (!signaled)
/* virtio create command still in flight */
ttm_eu_fence_buffer_objects(&ticket, &validate_list,
&fence->f);
spin_unlock_irqrestore(&drv->lock, irq_flags);
if (signaled)
/* virtio create command finished */
ttm_eu_backoff_reservation(&ticket, &validate_list);
}
virtio_gpu_unref_list(&validate_list);
}
*bo_ptr = bo; *bo_ptr = bo;
return 0; return 0;
err_put_objs:
virtio_gpu_array_put_free(objs);
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
kfree(bo);
return ret;
} }
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo) void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
......
...@@ -396,6 +396,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ...@@ -396,6 +396,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params, struct virtio_gpu_object_params *params,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_create_2d *cmd_p; struct virtio_gpu_resource_create_2d *cmd_p;
...@@ -403,6 +404,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, ...@@ -403,6 +404,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
...@@ -869,6 +871,7 @@ void ...@@ -869,6 +871,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params, struct virtio_gpu_object_params *params,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_resource_create_3d *cmd_p;
...@@ -876,6 +879,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, ...@@ -876,6 +879,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p));
vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment