Commit 54c12bc3 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Fix up user_dmabuf refcounting

If user space calls unreference on a user_dmabuf it will typically
kill the struct ttm_base_object member which is responsible for the
user-space visibility. However the dmabuf part may still be alive and
refcounted. In some situations, like for shared guest-backed surface
referencing/opening, the driver may try to reference the
struct ttm_base_object member again, causing an immediate kernel warning
and a later kernel NULL pointer dereference.

Fix this by always maintaining a reference on the struct
ttm_base_object member, in situations where it might subsequently be
referenced.

Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent 9fbcc7c0
...@@ -631,7 +631,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -631,7 +631,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size, uint32_t size,
bool shareable, bool shareable,
uint32_t *handle, uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf); struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf, struct vmw_dma_buffer *dma_buf,
uint32_t *handle); uint32_t *handle);
...@@ -645,7 +646,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, ...@@ -645,7 +646,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node); uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out); uint32_t id, struct vmw_dma_buffer **out,
struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
......
...@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL; ret = -EINVAL;
......
...@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
} }
*out_surf = NULL; *out_surf = NULL;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret; return ret;
} }
...@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size, uint32_t size,
bool shareable, bool shareable,
uint32_t *handle, uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf) struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *user_bo; struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp; struct ttm_buffer_object *tmp;
...@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
} }
*p_dma_buf = &user_bo->dma; *p_dma_buf = &user_bo->dma;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key; *handle = user_bo->prime.base.hash.key;
out_no_base_object: out_no_base_object:
...@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct vmw_dma_buffer *dma_buf; struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo; struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret; int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
...@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) { switch (arg->op) {
case drm_vmw_synccpu_grab: case drm_vmw_synccpu_grab:
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
&buffer_base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
dma); dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf); vmw_dmabuf_unreference(&dma_buf);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS && if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) { ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
...@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ...@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &dma_buf); req->size, false, &handle, &dma_buf,
NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
...@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, ...@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
} }
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out) uint32_t handle, struct vmw_dma_buffer **out,
struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *vmw_user_bo; struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base; struct ttm_base_object *base;
...@@ -743,6 +753,9 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, ...@@ -743,6 +753,9 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base); prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base);
if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma; *out = &vmw_user_bo->dma;
...@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle, args->size, false, &args->handle,
&dma_buf); &dma_buf, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
...@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, ...@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
struct vmw_dma_buffer *out_buf; struct vmw_dma_buffer *out_buf;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0) if (ret != 0)
return -EINVAL; return -EINVAL;
......
...@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
if (buffer_handle != SVGA3D_INVALID_ID) { if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
&buffer); &buffer, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader " DRM_ERROR("Could not find buffer for shader "
"creation.\n"); "creation.\n");
......
...@@ -46,6 +46,7 @@ struct vmw_user_surface { ...@@ -46,6 +46,7 @@ struct vmw_user_surface {
struct vmw_surface srf; struct vmw_surface srf;
uint32_t size; uint32_t size;
struct drm_master *master; struct drm_master *master;
struct ttm_base_object *backup_base;
}; };
/** /**
...@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL; *p_base = NULL;
ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
} }
...@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
res->backup_size, res->backup_size,
true, true,
&backup_handle, &backup_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
goto out_unlock; goto out_unlock;
...@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
if (req->buffer_handle != SVGA3D_INVALID_ID) { if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) { res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n"); DRM_ERROR("Surface backup buffer is too small.\n");
...@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req->drm_surface_flags & req->drm_surface_flags &
drm_vmw_surface_flag_shareable, drm_vmw_surface_flag_shareable,
&backup_handle, &backup_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment