Commit c10c1255 authored by Tina Zhang's avatar Tina Zhang Committed by Zhenyu Wang

drm/i915/gvt: remove workload from intel_shadow_wa_ctx structure

intel_shadow_wa_ctx is a field of intel_vgpu_workload. container_of() can
be used to refine the relation-ship between intel_shadow_wa_ctx and
intel_vgpu_workload. This patch removes the useless dereference.

v2. add "drm/i915/gvt" prefix. (Zhenyu)
Signed-off-by: default avatarTina Zhang <tina.zhang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 96cd733c
...@@ -2609,6 +2609,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2609,6 +2609,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail; unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
struct parser_exec_state s; struct parser_exec_state s;
int ret = 0; int ret = 0;
struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct intel_vgpu_workload,
wa_ctx);
/* ring base is page aligned */ /* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
...@@ -2623,14 +2626,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2623,14 +2626,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER; s.buf_addr_type = GTT_BUFFER;
s.vgpu = wa_ctx->workload->vgpu; s.vgpu = workload->vgpu;
s.ring_id = wa_ctx->workload->ring_id; s.ring_id = workload->ring_id;
s.ring_start = wa_ctx->indirect_ctx.guest_gma; s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size; s.ring_size = ring_size;
s.ring_head = gma_head; s.ring_head = gma_head;
s.ring_tail = gma_tail; s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = wa_ctx->workload; s.workload = workload;
ret = ip_gma_set(&s, gma_head); ret = ip_gma_set(&s, gma_head);
if (ret) if (ret)
...@@ -2713,12 +2716,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2713,12 +2716,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ctx_size = wa_ctx->indirect_ctx.size; int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct intel_vgpu_workload,
wa_ctx);
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
void *map; void *map;
obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv, obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES, roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE)); PAGE_SIZE));
if (IS_ERR(obj)) if (IS_ERR(obj))
...@@ -2738,8 +2744,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2738,8 +2744,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto unmap_src; goto unmap_src;
} }
ret = copy_gma_to_hva(wa_ctx->workload->vgpu, ret = copy_gma_to_hva(workload->vgpu,
wa_ctx->workload->vgpu->gtt.ggtt_mm, workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size, guest_gma, guest_gma + ctx_size,
map); map);
if (ret < 0) { if (ret < 0) {
...@@ -2777,7 +2783,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2777,7 +2783,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ret; int ret;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct intel_vgpu_workload,
wa_ctx);
struct intel_vgpu *vgpu = workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0) if (wa_ctx->indirect_ctx.size == 0)
return 0; return 0;
......
...@@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ring_id = wa_ctx->workload->ring_id; struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct i915_gem_context *shadow_ctx = struct intel_vgpu_workload,
wa_ctx->workload->vgpu->shadow_ctx; wa_ctx);
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj; shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
...@@ -680,7 +682,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -680,7 +682,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
CACHELINE_BYTES; CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma = workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK; per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1)); WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
} }
......
...@@ -67,7 +67,6 @@ struct shadow_per_ctx { ...@@ -67,7 +67,6 @@ struct shadow_per_ctx {
}; };
struct intel_shadow_wa_ctx { struct intel_shadow_wa_ctx {
struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx; struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx; struct shadow_per_ctx per_ctx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment