Commit 325eb94a authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: Move ring scan buffers into intel_vgpu_submission

Move ring scan buffers into intel_vgpu_submission since they belongs to
a part of vGPU submission stuffs.
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 8cf80a2e
...@@ -2604,6 +2604,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2604,6 +2604,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size; unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va; void *shadow_ring_buffer_va;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
...@@ -2619,21 +2620,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2619,21 +2620,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail; gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size; gma_top = workload->rb_start + guest_rb_size;
if (workload->rb_len > vgpu->ring_scan_buffer_size[ring_id]) { if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
void *p; void *p;
/* realloc the new ring buffer if needed */ /* realloc the new ring buffer if needed */
p = krealloc(vgpu->ring_scan_buffer[ring_id], workload->rb_len, p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
GFP_KERNEL); GFP_KERNEL);
if (!p) { if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM; return -ENOMEM;
} }
vgpu->ring_scan_buffer[ring_id] = p; s->ring_scan_buffer[ring_id] = p;
vgpu->ring_scan_buffer_size[ring_id] = workload->rb_len; s->ring_scan_buffer_size[ring_id] = workload->rb_len;
} }
shadow_ring_buffer_va = vgpu->ring_scan_buffer[ring_id]; shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
/* get shadow ring buffer va */ /* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va; workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
......
...@@ -864,15 +864,18 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) ...@@ -864,15 +864,18 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
clean_workloads(vgpu, ALL_ENGINES); clean_workloads(vgpu, ALL_ENGINES);
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
kfree(vgpu->ring_scan_buffer[i]); struct intel_vgpu_submission *s = &vgpu->submission;
vgpu->ring_scan_buffer[i] = NULL;
vgpu->ring_scan_buffer_size[i] = 0; kfree(s->ring_scan_buffer[i]);
s->ring_scan_buffer[i] = NULL;
s->ring_scan_buffer_size[i] = 0;
} }
} }
#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) #define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{ {
struct intel_vgpu_submission *s = &vgpu->submission;
enum intel_engine_id i; enum intel_engine_id i;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -881,21 +884,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) ...@@ -881,21 +884,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
/* each ring has a shadow ring buffer until vgpu destroyed */ /* each ring has a shadow ring buffer until vgpu destroyed */
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
vgpu->ring_scan_buffer[i] = s->ring_scan_buffer[i] =
kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
if (!vgpu->ring_scan_buffer[i]) { if (!s->ring_scan_buffer[i]) {
gvt_vgpu_err("fail to alloc ring scan buffer\n"); gvt_vgpu_err("fail to alloc ring scan buffer\n");
goto out; goto out;
} }
vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; s->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
} }
return 0; return 0;
out: out:
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->dev_priv, i) {
if (vgpu->ring_scan_buffer_size[i]) { if (s->ring_scan_buffer_size[i]) {
kfree(vgpu->ring_scan_buffer[i]); kfree(s->ring_scan_buffer[i]);
vgpu->ring_scan_buffer[i] = NULL; s->ring_scan_buffer[i] = NULL;
vgpu->ring_scan_buffer_size[i] = 0; s->ring_scan_buffer_size[i] = 0;
} }
} }
return -ENOMEM; return -ENOMEM;
......
...@@ -150,6 +150,9 @@ struct intel_vgpu_submission { ...@@ -150,6 +150,9 @@ struct intel_vgpu_submission {
struct i915_gem_context *shadow_ctx; struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
/* 1/2K for each engine */
void *ring_scan_buffer[I915_NUM_ENGINES];
int ring_scan_buffer_size[I915_NUM_ENGINES];
}; };
struct intel_vgpu { struct intel_vgpu {
...@@ -172,10 +175,6 @@ struct intel_vgpu { ...@@ -172,10 +175,6 @@ struct intel_vgpu {
struct intel_vgpu_opregion opregion; struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display; struct intel_vgpu_display display;
struct intel_vgpu_submission submission; struct intel_vgpu_submission submission;
/* 1/2K for each engine */
void *ring_scan_buffer[I915_NUM_ENGINES];
int ring_scan_buffer_size[I915_NUM_ENGINES];
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct { struct {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment