Commit 1406a14b authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: Introduce intel_vgpu_submission

Introduce intel_vgpu_submission to hold all members related to submission
in struct intel_vgpu before.
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 9a9829e9
......@@ -362,7 +362,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
{
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_gvt_mm_unreference(workload->shadow_mm);
kmem_cache_free(workload->vgpu->workloads, workload);
kmem_cache_free(workload->vgpu->submission.workloads, workload);
}
#define get_desc_from_elsp_dwords(ed, i) \
......@@ -401,7 +401,8 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
struct intel_vgpu_workload,
wa_ctx);
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct intel_vgpu_submission *s = &workload->vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
......@@ -474,6 +475,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
int ret;
......@@ -514,7 +516,7 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
if (!ret)
goto out;
else
......@@ -533,7 +535,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
......@@ -652,6 +655,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
......@@ -689,7 +693,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
if (!workload)
return -ENOMEM;
......@@ -738,7 +742,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
}
if (emulate_schedule_in)
workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
......@@ -748,7 +752,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
ret = prepare_mm(workload);
if (ret) {
kmem_cache_free(vgpu->workloads, workload);
kmem_cache_free(s->workloads, workload);
return ret;
}
......@@ -769,7 +773,8 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
......@@ -811,7 +816,8 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
......@@ -833,6 +839,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
......@@ -841,12 +848,11 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&vgpu->workload_q_head[engine->id], list) {
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
free_workload(pos);
}
clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
clear_bit(engine->id, s->shadow_ctx_desc_updated);
}
}
......
......@@ -142,6 +142,15 @@ struct vgpu_sched_ctl {
int weight;
};
struct intel_vgpu_submission {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
......@@ -161,16 +170,12 @@ struct intel_vgpu {
struct intel_vgpu_gtt gtt;
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct intel_vgpu_submission submission;
/* 1/2K for each reserve ring buffer */
void *reserve_ring_buffer_va[I915_NUM_ENGINES];
int reserve_ring_buffer_size[I915_NUM_ENGINES];
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
......
......@@ -1451,7 +1451,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL;
execlist = &vgpu->execlist[ring_id];
execlist = &vgpu->submission.execlist[ring_id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
......
......@@ -1188,7 +1188,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
vgpu->shadow_ctx->hw_id);
vgpu->submission.shadow_ctx->hw_id);
}
return sprintf(buf, "\n");
}
......
......@@ -261,14 +261,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
struct intel_vgpu_submission *s = &vgpu->submission;
u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
i915_reg_t last_reg = _MMIO(0);
struct render_mmio *mmio;
u32 v;
int i, array_size;
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
......
......@@ -57,7 +57,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
......@@ -249,12 +249,13 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
*/
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
......@@ -267,7 +268,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
......@@ -326,9 +327,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
int ret = 0;
......@@ -414,7 +417,7 @@ static struct intel_vgpu_workload *pick_next_workload(
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
atomic_inc(&workload->vgpu->running_workload_num);
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
......@@ -424,8 +427,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
......@@ -491,15 +495,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
struct intel_vgpu *vgpu;
struct intel_vgpu_workload *workload =
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
......@@ -536,7 +539,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
}
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
engine->context_unpin(engine, workload->vgpu->shadow_ctx);
engine->context_unpin(engine, s->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
......@@ -548,7 +551,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
workload->complete(workload);
atomic_dec(&vgpu->running_workload_num);
atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
if (gvt->scheduler.need_reschedule)
......@@ -637,14 +640,15 @@ static int workload_thread(void *priv)
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
if (atomic_read(&vgpu->running_workload_num)) {
if (atomic_read(&s->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
!atomic_read(&vgpu->running_workload_num));
!atomic_read(&s->running_workload_num));
}
}
......@@ -718,8 +722,10 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
*/
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
i915_gem_context_put(vgpu->shadow_ctx);
kmem_cache_destroy(vgpu->workloads);
struct intel_vgpu_submission *s = &vgpu->submission;
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
/**
......@@ -734,35 +740,36 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
enum intel_engine_id i;
struct intel_engine_cs *engine;
int ret;
vgpu->shadow_ctx = i915_gem_context_create_gvt(
s->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!vgpu->workloads) {
if (!s->workloads) {
ret = -ENOMEM;
goto out_shadow_ctx;
}
for_each_engine(engine, vgpu->gvt->dev_priv, i)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
INIT_LIST_HEAD(&s->workload_q_head[i]);
atomic_set(&vgpu->running_workload_num, 0);
atomic_set(&s->running_workload_num, 0);
return 0;
out_shadow_ctx:
i915_gem_context_put(vgpu->shadow_ctx);
i915_gem_context_put(s->shadow_ctx);
return ret;
}
......@@ -122,7 +122,7 @@ struct intel_shadow_bb_entry {
};
#define workload_q_head(vgpu, ring_id) \
(&(vgpu->workload_q_head[ring_id]))
(&(vgpu->submission.workload_q_head[ring_id]))
#define queue_workload(workload) do { \
list_add_tail(&workload->list, \
......
......@@ -226,7 +226,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false;
if (atomic_read(&vgpu->running_workload_num)) {
if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
......@@ -293,7 +293,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->gvt = gvt;
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment