Commit a950b989 authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Do not drop the reference to the handle too soon

v3: Fix vmw_user_bo_lookup which was also dropping the gem reference
before the kernel was done with buffer depending on userspace doing
the right thing. Same bug, different spot.

It is possible for userspace to predict the next buffer handle and
to destroy the buffer while it's still used by the kernel. Delay
dropping the internal reference on the buffers until kernel is done
with them.

Instead of immediately dropping the gem reference in vmw_user_bo_lookup
and vmw_gem_object_create_with_handle let the callers decide when they're
ready give the control back to userspace.

Also fixes the second usage of vmw_gem_object_create_with_handle in
vmwgfx_surface.c which wasn't grabbing an explicit reference
to the gem object which could have been destroyed by the userspace
on the owning surface at any point.
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Fixes: 8afa13a0 ("drm/vmwgfx: Implement DRIVER_GEM")
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarMaaz Mombasawala <mombasawalam@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230211050514.2431155-1-zack@kde.org
(cherry picked from commit 9ef8d83e)
Cc: <stable@vger.kernel.org> # v5.17+
parent 1a689792
...@@ -598,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, ...@@ -598,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->base);
} }
drm_gem_object_put(&vmw_bo->base.base);
return ret; return ret;
} }
...@@ -638,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -638,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo); vmw_bo_unreference(&vbo);
drm_gem_object_put(&vbo->base.base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY) if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY; return -EBUSY;
...@@ -695,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, ...@@ -695,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* struct vmw_buffer_object should be placed. * struct vmw_buffer_object should be placed.
* Return: Zero on success, Negative error code on error. * Return: Zero on success, Negative error code on error.
* *
* The vmw buffer object pointer will be refcounted. * The vmw buffer object pointer will be refcounted (both ttm and gem)
*/ */
int vmw_user_bo_lookup(struct drm_file *filp, int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle, uint32_t handle,
...@@ -712,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp, ...@@ -712,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
*out = gem_to_vmw_bo(gobj); *out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base); ttm_bo_get(&(*out)->base);
drm_gem_object_put(gobj);
return 0; return 0;
} }
...@@ -793,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -793,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle, args->size, &args->handle,
&vbo); &vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
return ret; return ret;
} }
......
...@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
......
...@@ -152,8 +152,6 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, ...@@ -152,8 +152,6 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo: out_no_bo:
return ret; return ret;
} }
...@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, ...@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle; rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0; rep->cur_gmr_offset = 0;
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
out_no_bo: out_no_bo:
return ret; return ret;
} }
......
...@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out: err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */ /* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) if (bo) {
vmw_bo_unreference(&bo); vmw_bo_unreference(&bo);
drm_gem_object_put(&bo->base.base);
}
if (surface) if (surface)
vmw_surface_unreference(&surface); vmw_surface_unreference(&surface);
......
...@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf); vmw_bo_unreference(&buf);
drm_gem_object_put(&buf->base.base);
out_unlock: out_unlock:
mutex_unlock(&overlay->mutex); mutex_unlock(&overlay->mutex);
......
...@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle); num_output_sig, tfile, shader_handle);
out_bad_arg: out_bad_arg:
vmw_bo_unreference(&buffer); vmw_bo_unreference(&buffer);
drm_gem_object_put(&buffer->base.base);
return ret; return ret;
} }
......
...@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base); container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
if (base->shareable && res && res->backup) if (res && res->backup)
drm_gem_object_put(&res->backup->base.base); drm_gem_object_put(&res->backup->base.base);
*p_base = NULL; *p_base = NULL;
...@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
vmw_bo_reference(res->backup); vmw_bo_reference(res->backup);
drm_gem_object_get(&res->backup->base.base); /*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
*/
drm_gem_handle_delete(file_priv, backup_handle);
} }
tmp = vmw_resource_reference(&srf->res); tmp = vmw_resource_reference(&srf->res);
...@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node); drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size; rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle; rep->buffer_handle = backup_handle;
if (user_srf->prime.base.shareable)
drm_gem_object_get(&res->backup->base.base);
} else { } else {
rep->buffer_map_handle = 0; rep->buffer_map_handle = 0;
rep->buffer_size = 0; rep->buffer_size = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment