Commit 952f89f0 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gvt: Wean off struct_mutex

Use the local vgpu_lock while preparing workloads to avoid taking the
obsolete i915->drm.struct_mutex
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191016183902.13614-1-chris@chris-wilson.co.uk
parent 0dc3c562
...@@ -415,10 +415,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -415,10 +415,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret; int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&vgpu->vgpu_lock);
if (workload->shadow) if (workload->shadow)
return 0; return 0;
...@@ -580,8 +579,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) ...@@ -580,8 +579,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_shadow_bb *bb, *pos; struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb)) if (list_empty(&workload->shadow_bb))
...@@ -590,8 +587,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -590,8 +587,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb = list_first_entry(&workload->shadow_bb, bb = list_first_entry(&workload->shadow_bb,
struct intel_vgpu_shadow_bb, list); struct intel_vgpu_shadow_bb, list);
mutex_lock(&dev_priv->drm.struct_mutex);
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) { if (bb->obj) {
if (bb->accessing) if (bb->accessing)
...@@ -609,8 +604,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -609,8 +604,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_del(&bb->list); list_del(&bb->list);
kfree(bb); kfree(bb);
} }
mutex_unlock(&dev_priv->drm.struct_mutex);
} }
static int prepare_workload(struct intel_vgpu_workload *workload) static int prepare_workload(struct intel_vgpu_workload *workload)
...@@ -685,7 +678,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -685,7 +678,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
static int dispatch_workload(struct intel_vgpu_workload *workload) static int dispatch_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq; struct i915_request *rq;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
int ret; int ret;
...@@ -694,7 +686,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -694,7 +686,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ring_id, workload); ring_id, workload);
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload); ret = intel_gvt_workload_req_alloc(workload);
if (ret) if (ret)
...@@ -729,7 +720,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -729,7 +720,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
err_req: err_req:
if (ret) if (ret)
workload->status = ret; workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
return ret; return ret;
} }
...@@ -1594,9 +1584,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1594,9 +1584,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
*/ */
if (list_empty(workload_q_head(vgpu, ring_id))) { if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(&dev_priv->runtime_pm); intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&vgpu->vgpu_lock);
ret = intel_gvt_scan_and_shadow_workload(workload); ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment