Commit 1bc25851 authored by Changbin Du's avatar Changbin Du Committed by Zhenyu Wang

drm/i915/gvt: Refine the intel_vgpu_mm reference management

If we manage an object with a reference count, then its life cycle
must flow the reference count operations. Meanwhile, change the
operation functions to generic name *put* and *get*.
Signed-off-by: default avatarChangbin Du <changbin.du@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent ede9d0cf
......@@ -1613,13 +1613,13 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
}
/**
* intel_vgpu_destroy_mm - destroy a mm object
* _intel_vgpu_mm_release - destroy a mm object
* @mm_ref: a kref object
*
* This function is used to destroy a mm object for vGPU
*
*/
void intel_vgpu_destroy_mm(struct kref *mm_ref)
void _intel_vgpu_mm_release(struct kref *mm_ref)
{
struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
......@@ -2101,7 +2101,7 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
intel_vgpu_destroy_mm(&mm->ref);
intel_vgpu_destroy_mm(mm);
}
if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
......@@ -2115,7 +2115,7 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
intel_vgpu_destroy_mm(&vgpu->gtt.ggtt_mm->ref);
intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
vgpu->gtt.ggtt_mm = NULL;
}
......@@ -2240,7 +2240,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
if (mm) {
intel_gvt_mm_reference(mm);
intel_vgpu_mm_get(mm);
} else {
mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
if (IS_ERR(mm)) {
......@@ -2273,7 +2273,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL;
}
intel_gvt_mm_unreference(mm);
intel_vgpu_mm_put(mm);
return 0;
}
......
......@@ -191,7 +191,23 @@ extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
void _intel_vgpu_mm_release(struct kref *mm_ref);
static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, _intel_vgpu_mm_release);
}
static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
{
intel_vgpu_mm_put(mm);
}
struct intel_vgpu_guest_page;
......@@ -283,16 +299,6 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, intel_vgpu_destroy_mm);
}
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
......
......@@ -1132,7 +1132,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &workload->vgpu->submission;
if (workload->shadow_mm)
intel_gvt_mm_unreference(workload->shadow_mm);
intel_vgpu_mm_put(workload->shadow_mm);
kmem_cache_free(s->workloads, workload);
}
......@@ -1200,7 +1200,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, pdps);
if (mm) {
intel_gvt_mm_reference(mm);
intel_vgpu_mm_get(mm);
} else {
mm = intel_vgpu_create_ppgtt_mm(workload->vgpu, root_entry_type,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment