Commit c84adb30 authored by David Stevens's avatar David Stevens Committed by Gerd Hoffmann

drm/virtio: Support virtgpu exported resources

Add support for UUID-based resource sharing mechanism to virtgpu. This
implements the new virtgpu commands and hooks them up to dma-buf's
get_uuid callback.
Signed-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200818071343.3461203-4-stevensd@chromium.orgSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent 592d9fba
...@@ -165,6 +165,7 @@ static unsigned int features[] = { ...@@ -165,6 +165,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_VIRGL, VIRTIO_GPU_F_VIRGL,
#endif #endif
VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
}; };
static struct virtio_driver virtio_gpu_driver = { static struct virtio_driver virtio_gpu_driver = {
.feature_table = features, .feature_table = features,
...@@ -202,6 +203,8 @@ static struct drm_driver driver = { ...@@ -202,6 +203,8 @@ static struct drm_driver driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_mmap = drm_gem_prime_mmap, .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_export = virtgpu_gem_prime_export,
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_create_object = virtio_gpu_create_object, .gem_create_object = virtio_gpu_create_object,
......
...@@ -49,6 +49,10 @@ ...@@ -49,6 +49,10 @@
#define DRIVER_MINOR 1 #define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define UUID_INITIALIZING 0
#define UUID_INITIALIZED 1
#define UUID_INITIALIZATION_FAILED 2
struct virtio_gpu_object_params { struct virtio_gpu_object_params {
uint32_t format; uint32_t format;
uint32_t width; uint32_t width;
...@@ -71,6 +75,9 @@ struct virtio_gpu_object { ...@@ -71,6 +75,9 @@ struct virtio_gpu_object {
uint32_t hw_res_handle; uint32_t hw_res_handle;
bool dumb; bool dumb;
bool created; bool created;
int uuid_state;
uuid_t uuid;
}; };
#define gem_to_virtio_gpu_obj(gobj) \ #define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base) container_of((gobj), struct virtio_gpu_object, base.base)
...@@ -200,6 +207,7 @@ struct virtio_gpu_device { ...@@ -200,6 +207,7 @@ struct virtio_gpu_device {
bool has_virgl_3d; bool has_virgl_3d;
bool has_edid; bool has_edid;
bool has_indirect; bool has_indirect;
bool has_resource_assign_uuid;
struct work_struct config_changed_work; struct work_struct config_changed_work;
...@@ -210,6 +218,9 @@ struct virtio_gpu_device { ...@@ -210,6 +218,9 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets; struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets; uint32_t num_capsets;
struct list_head cap_cache; struct list_head cap_cache;
/* protects resource state when exporting */
spinlock_t resource_export_lock;
}; };
struct virtio_gpu_fpriv { struct virtio_gpu_fpriv {
...@@ -336,6 +347,10 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work); ...@@ -336,6 +347,10 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev); void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
/* virtgpu_display.c */ /* virtgpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
...@@ -367,6 +382,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, ...@@ -367,6 +382,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo); bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
/* virtgpu_prime.c */ /* virtgpu_prime.c */
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags);
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf);
int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
uuid_t *uuid);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach, struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt); struct sg_table *sgt);
......
...@@ -118,6 +118,7 @@ int virtio_gpu_init(struct drm_device *dev) ...@@ -118,6 +118,7 @@ int virtio_gpu_init(struct drm_device *dev)
vgdev->dev = dev->dev; vgdev->dev = dev->dev;
spin_lock_init(&vgdev->display_info_lock); spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
ida_init(&vgdev->ctx_id_ida); ida_init(&vgdev->ctx_id_ida);
ida_init(&vgdev->resource_ida); ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq); init_waitqueue_head(&vgdev->resp_wq);
...@@ -146,6 +147,9 @@ int virtio_gpu_init(struct drm_device *dev) ...@@ -146,6 +147,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
vgdev->has_indirect = true; vgdev->has_indirect = true;
} }
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
vgdev->has_resource_assign_uuid = true;
}
DRM_INFO("features: %cvirgl %cedid\n", DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-', vgdev->has_virgl_3d ? '+' : '-',
......
...@@ -23,12 +23,102 @@ ...@@ -23,12 +23,102 @@
*/ */
#include <drm/drm_prime.h> #include <drm/drm_prime.h>
#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h" #include "virtgpu_drv.h"
/* Empty Implementations as there should not be any other driver for a virtual static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
* device that might share buffers with virtgpu uuid_t *uuid)
{
struct drm_gem_object *obj = buf->priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING);
if (bo->uuid_state != UUID_INITIALIZED)
return -ENODEV;
uuid_copy(uuid, &bo->uuid);
return 0;
}
const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
.cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
},
.device_attach = drm_gem_map_attach,
.get_uuid = virtgpu_virtio_get_uuid,
};
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags)
{
struct dma_buf *buf;
struct drm_device *dev = obj->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_object_array *objs;
int ret = 0;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (vgdev->has_resource_assign_uuid) {
objs = virtio_gpu_array_alloc(1);
if (!objs)
return ERR_PTR(-ENOMEM);
virtio_gpu_array_add_obj(objs, &bo->base.base);
ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
if (ret)
return ERR_PTR(ret);
virtio_gpu_notify(vgdev);
} else {
bo->uuid_state = UUID_INITIALIZATION_FAILED;
}
exp_info.ops = &virtgpu_dmabuf_ops.ops;
exp_info.size = obj->size;
exp_info.flags = flags;
exp_info.priv = obj;
exp_info.resv = obj->resv;
buf = virtio_dma_buf_export(&exp_info);
if (IS_ERR(buf))
return buf;
drm_dev_get(dev);
drm_gem_object_get(obj);
return buf;
}
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
struct drm_gem_object *obj;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from our own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/ */
drm_gem_object_get(obj);
return obj;
}
}
return drm_gem_prime_import(dev, buf);
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach, struct drm_device *dev, struct dma_buf_attachment *attach,
......
...@@ -1107,3 +1107,58 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, ...@@ -1107,3 +1107,58 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
memcpy(cur_p, &output->cursor, sizeof(output->cursor)); memcpy(cur_p, &output->cursor, sizeof(output->cursor));
virtio_gpu_queue_cursor(vgdev, vbuf); virtio_gpu_queue_cursor(vgdev, vbuf);
} }
static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
struct virtio_gpu_object *obj =
gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
struct virtio_gpu_resp_resource_uuid *resp =
(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
uint32_t resp_type = le32_to_cpu(resp->hdr.type);
spin_lock(&vgdev->resource_export_lock);
WARN_ON(obj->uuid_state != UUID_INITIALIZING);
if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
obj->uuid_state == UUID_INITIALIZING) {
memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
obj->uuid_state = UUID_INITIALIZED;
} else {
obj->uuid_state = UUID_INITIALIZATION_FAILED;
}
spin_unlock(&vgdev->resource_export_lock);
wake_up_all(&vgdev->resp_wq);
}
int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_resource_assign_uuid *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
struct virtio_gpu_resp_resource_uuid *resp_buf;
resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
if (!resp_buf) {
spin_lock(&vgdev->resource_export_lock);
bo->uuid_state = UUID_INITIALIZATION_FAILED;
spin_unlock(&vgdev->resource_export_lock);
virtio_gpu_array_put_free(objs);
return -ENOMEM;
}
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
vbuf->objs = objs;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment