Commit 2fe4ca9d authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: move mapping teardown to virtio_gpu_cleanup_object()

Stop sending DETACH_BACKING commands, that will happening anyway when
releasing resources via UNREF.  Handle guest-side cleanup in
virtio_gpu_cleanup_object(), called when the host finished processing
the UNREF command.
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200207074638.26386-4-kraxel@redhat.comReviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
parent 1ed5f698
...@@ -282,8 +282,6 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, ...@@ -282,8 +282,6 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj, struct virtio_gpu_object *obj,
struct virtio_gpu_fence *fence); struct virtio_gpu_fence *fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev); int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include "virtgpu_drv.h" #include "virtgpu_drv.h"
...@@ -65,6 +66,17 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) ...@@ -65,6 +66,17 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{ {
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
if (bo->pages) {
if (bo->mapped) {
dma_unmap_sg(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->mapped,
DMA_TO_DEVICE);
bo->mapped = 0;
}
sg_free_table(bo->pages);
bo->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base);
}
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
drm_gem_shmem_free_object(&bo->base.base); drm_gem_shmem_free_object(&bo->base.base);
} }
...@@ -74,8 +86,6 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj) ...@@ -74,8 +86,6 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
if (bo->pages)
virtio_gpu_object_detach(vgdev, bo);
if (bo->created) { if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo); virtio_gpu_cmd_unref_resource(vgdev, bo);
/* completion handler calls virtio_gpu_cleanup_object() */ /* completion handler calls virtio_gpu_cleanup_object() */
......
...@@ -548,22 +548,6 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, ...@@ -548,22 +548,6 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
} }
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
cmd_p->resource_id = cpu_to_le32(resource_id);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id, uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height, uint32_t width, uint32_t height,
...@@ -1158,36 +1142,6 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, ...@@ -1158,36 +1142,6 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
return 0; return 0;
} }
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
if (WARN_ON_ONCE(!obj->pages))
return;
if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f);
/* ... then tear down iommu mappings */
dma_unmap_sg(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->mapped,
DMA_TO_DEVICE);
obj->mapped = 0;
} else {
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
}
sg_free_table(obj->pages);
obj->pages = NULL;
drm_gem_shmem_unpin(&obj->base.base);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output) struct virtio_gpu_output *output)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment