Commit 9c6c892e authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Greg Kroah-Hartman

drm/vmwgfx: Fix a destoy-while-held mutex problem.

commit 73a88250 upstream.

When validating legacy surfaces, the backup bo might be destroyed at
surface validate time. However, the kms resource validation code may have
the bo reserved, so we will destroy a locked mutex. While there shouldn't
be any other users of that mutex when it is destroyed, it causes a lock
leak and thus throws a lockdep error.

Fix this by having the kms resource validation code hold a reference to
the bo while we have it reserved. We do this by introducing a validation
context which might come in handy when the kms code is extended to validate
multiple resources or buffers.

Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0972e0f0
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include "vmwgfx_kms.h" #include "vmwgfx_kms.h"
/* Might need a hrtimer here? */ /* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
...@@ -1910,9 +1909,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -1910,9 +1909,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of * Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare. * vmw_kms_helper_resource_prepare.
*/ */
void vmw_kms_helper_resource_revert(struct vmw_resource *res) void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
{ {
vmw_kms_helper_buffer_revert(res->backup); struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf);
vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
...@@ -1929,10 +1931,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) ...@@ -1929,10 +1931,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
* interrupted by a signal. * interrupted by a signal.
*/ */
int vmw_kms_helper_resource_prepare(struct vmw_resource *res, int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible) bool interruptible,
struct vmw_validation_ctx *ctx)
{ {
int ret = 0; int ret = 0;
ctx->buf = NULL;
ctx->res = res;
if (interruptible) if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else else
...@@ -1951,6 +1957,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, ...@@ -1951,6 +1957,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
res->dev_priv->has_mob); res->dev_priv->has_mob);
if (ret) if (ret)
goto out_unreserve; goto out_unreserve;
ctx->buf = vmw_dmabuf_reference(res->backup);
} }
ret = vmw_resource_validate(res); ret = vmw_resource_validate(res);
if (ret) if (ret)
...@@ -1958,7 +1966,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, ...@@ -1958,7 +1966,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
return 0; return 0;
out_revert: out_revert:
vmw_kms_helper_buffer_revert(res->backup); vmw_kms_helper_buffer_revert(ctx->buf);
out_unreserve: out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
out_unlock: out_unlock:
...@@ -1974,11 +1982,13 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, ...@@ -1974,11 +1982,13 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here. * ref-counted fence pointer is returned here.
*/ */
void vmw_kms_helper_resource_finish(struct vmw_resource *res, void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence) struct vmw_fence_obj **out_fence)
{ {
if (res->backup || out_fence) struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
if (ctx->buf || out_fence)
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL); out_fence, NULL);
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
......
...@@ -180,6 +180,11 @@ struct vmw_display_unit { ...@@ -180,6 +180,11 @@ struct vmw_display_unit {
bool is_implicit; bool is_implicit;
}; };
struct vmw_validation_ctx {
struct vmw_resource *res;
struct vmw_dma_buffer *buf;
};
#define vmw_crtc_to_du(x) \ #define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc) container_of(x, struct vmw_display_unit, crtc)
#define vmw_connector_to_du(x) \ #define vmw_connector_to_du(x) \
...@@ -230,9 +235,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -230,9 +235,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user * struct drm_vmw_fence_rep __user *
user_fence_rep); user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res, int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible); bool interruptible,
void vmw_kms_helper_resource_revert(struct vmw_resource *res); struct vmw_validation_ctx *ctx);
void vmw_kms_helper_resource_finish(struct vmw_resource *res, void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence); struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv, int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
......
...@@ -841,12 +841,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -841,12 +841,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base); container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty; struct vmw_kms_sou_surface_dirty sdirty;
struct vmw_validation_ctx ctx;
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vfbs->surface->res;
ret = vmw_kms_helper_resource_prepare(srf, true); ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -865,7 +866,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -865,7 +866,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc, dest_x, dest_y, num_clips, inc,
&sdirty.base); &sdirty.base);
vmw_kms_helper_resource_finish(srf, out_fence); vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret; return ret;
} }
......
...@@ -1003,12 +1003,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -1003,12 +1003,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base); container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty; struct vmw_stdu_dirty sdirty;
struct vmw_validation_ctx ctx;
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vfbs->surface->res;
ret = vmw_kms_helper_resource_prepare(srf, true); ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -1031,7 +1032,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -1031,7 +1032,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc, dest_x, dest_y, num_clips, inc,
&sdirty.base); &sdirty.base);
out_finish: out_finish:
vmw_kms_helper_resource_finish(srf, out_fence); vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment