Commit d76ce03e authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Replace unconditional mutex unlocked warnings with lockdep counterpart

Replace instances of WARN_ON[_ONCE](!mutex_is_held()) with
lockdep_assert_held(). This makes sure the checking process actually
holds the mutex and also removes the checks from release builds
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent cc1e3b79
...@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) ...@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
{ {
struct vmw_cmdbuf_header *cur = man->cur; struct vmw_cmdbuf_header *cur = man->cur;
WARN_ON(!mutex_is_locked(&man->cur_mutex)); lockdep_assert_held_once(&man->cur_mutex);
if (!cur) if (!cur)
return; return;
...@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, ...@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
{ {
struct vmw_cmdbuf_header *cur = man->cur; struct vmw_cmdbuf_header *cur = man->cur;
WARN_ON(!mutex_is_locked(&man->cur_mutex)); lockdep_assert_held_once(&man->cur_mutex);
WARN_ON(size > cur->reserved); WARN_ON(size > cur->reserved);
man->cur_pos += size; man->cur_pos += size;
......
...@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, ...@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
{ {
struct vmw_dx_shader *entry, *next; struct vmw_dx_shader *entry, *next;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) { list_for_each_entry_safe(entry, next, list, cotable_head) {
WARN_ON(vmw_dx_shader_scrub(&entry->res)); WARN_ON(vmw_dx_shader_scrub(&entry->res));
......
...@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res) ...@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res)
union vmw_view_destroy body; union vmw_view_destroy body;
} *cmd; } *cmd;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head); vmw_binding_res_list_scrub(&res->binding_head);
if (!view->committed || res->id == -1) if (!view->committed || res->id == -1)
...@@ -439,7 +439,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, ...@@ -439,7 +439,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
{ {
struct vmw_view *entry, *next; struct vmw_view *entry, *next;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) list_for_each_entry_safe(entry, next, list, cotable_head)
WARN_ON(vmw_view_destroy(&entry->res)); WARN_ON(vmw_view_destroy(&entry->res));
...@@ -459,7 +459,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, ...@@ -459,7 +459,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
{ {
struct vmw_view *entry, *next; struct vmw_view *entry, *next;
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, srf_head) list_for_each_entry_safe(entry, next, list, srf_head)
WARN_ON(vmw_view_destroy(&entry->res)); WARN_ON(vmw_view_destroy(&entry->res));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment