Commit e8b6e76f authored by Gurchetan Singh's avatar Gurchetan Singh Committed by Gerd Hoffmann

drm/virtio: implement context init: plumb {base_fence_ctx, ring_idx} to virtio_gpu_fence_alloc

These were defined in the previous commit. We'll need these
parameters when allocating a dma_fence.  The use case for this
is multiple synchronizations timelines.

The maximum number of timelines per 3D instance will be 32. Usually,
only 2 are needed -- one for CPU commands, and another for GPU
commands.

As such, we'll need to specify these parameters when allocating a
dma_fence.

vgdev->fence_drv.context is the "default" fence context for 2D mode
and old userspace.
Signed-off-by: default avatarGurchetan Singh <gurchetansingh@chromium.org>
Acked-by: default avatarLingfeng Yang <lfy@google.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-8-gurchetansingh@chromium.orgSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent 7547675b
...@@ -426,8 +426,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, ...@@ -426,8 +426,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
int index); int index);
/* virtgpu_fence.c */ /* virtgpu_fence.c */
struct virtio_gpu_fence *virtio_gpu_fence_alloc( struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
struct virtio_gpu_device *vgdev); uint64_t base_fence_ctx,
uint32_t ring_idx);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence); struct virtio_gpu_fence *fence);
......
...@@ -71,7 +71,9 @@ static const struct dma_fence_ops virtio_gpu_fence_ops = { ...@@ -71,7 +71,9 @@ static const struct dma_fence_ops virtio_gpu_fence_ops = {
.timeline_value_str = virtio_gpu_timeline_value_str, .timeline_value_str = virtio_gpu_timeline_value_str,
}; };
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
uint64_t base_fence_ctx,
uint32_t ring_idx)
{ {
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence), struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
......
...@@ -173,7 +173,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -173,7 +173,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_memdup; goto out_memdup;
} }
out_fence = virtio_gpu_fence_alloc(vgdev); out_fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if(!out_fence) { if(!out_fence) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unresv; goto out_unresv;
...@@ -288,7 +288,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, ...@@ -288,7 +288,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (params.size == 0) if (params.size == 0)
params.size = PAGE_SIZE; params.size = PAGE_SIZE;
fence = virtio_gpu_fence_alloc(vgdev); fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) if (!fence)
return -ENOMEM; return -ENOMEM;
ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence); ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
...@@ -367,7 +367,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ...@@ -367,7 +367,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0) if (ret != 0)
goto err_put_free; goto err_put_free;
fence = virtio_gpu_fence_alloc(vgdev); fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) { if (!fence) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unlock; goto err_unlock;
...@@ -427,7 +427,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -427,7 +427,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free; goto err_put_free;
ret = -ENOMEM; ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev); fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0);
if (!fence) if (!fence)
goto err_unlock; goto err_unlock;
......
...@@ -256,7 +256,8 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, ...@@ -256,7 +256,8 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0; return 0;
if (bo->dumb && (plane->state->fb != new_state->fb)) { if (bo->dumb && (plane->state->fb != new_state->fb)) {
vgfb->fence = virtio_gpu_fence_alloc(vgdev); vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0);
if (!vgfb->fence) if (!vgfb->fence)
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment