Commit 75e675f8 authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-next-2020-03-10' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2020-03-10

- Fix CFL dmabuf display after vfio edid enabling (Tina)
- Clean up scan non-priv batch debugfs entry (Chris)
- Use intel engines initialized in gvt, cleanup previous ring id (Chris)
- Use intel_gt instead (Chris)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200310081928.GG28483@zhen-hp.sh.intel.com
parents 765e7cd9 a61ac1e7
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gt *gt = gvt->gt;
unsigned int flags; unsigned int flags;
u64 start, end, size; u64 start, end, size;
struct drm_mm_node *node; struct drm_mm_node *node;
...@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE; flags = PIN_MAPPABLE;
} }
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(dev_priv); mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE, size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
start, end, flags); start, end, flags);
mmio_hw_access_post(dev_priv); mmio_hw_access_post(gt);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gt->ggtt->vm.mutex);
if (ret) if (ret)
gvt_err("fail to alloc %s gm space from host\n", gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low"); high_gm ? "high" : "low");
...@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu) static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gt *gt = gvt->gt;
int ret; int ret;
ret = alloc_gm(vgpu, false); ret = alloc_gm(vgpu, false);
...@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) ...@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0; return 0;
out_free_aperture: out_free_aperture:
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gt->ggtt->vm.mutex);
return ret; return ret;
} }
static void free_vgpu_gm(struct intel_vgpu *vgpu) static void free_vgpu_gm(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gt *gt = gvt->gt;
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gt->ggtt->vm.mutex);
} }
/** /**
...@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, ...@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value) u32 fence, u64 value)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg; struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi; i915_reg_t fence_reg_lo, fence_reg_hi;
assert_rpm_wakelock_held(&dev_priv->runtime_pm); assert_rpm_wakelock_held(uncore->rpm);
if (drm_WARN_ON(&dev_priv->drm, fence >= vgpu_fence_sz(vgpu))) if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return; return;
reg = vgpu->fence.regs[fence]; reg = vgpu->fence.regs[fence];
if (drm_WARN_ON(&dev_priv->drm, !reg)) if (drm_WARN_ON(&i915->drm, !reg))
return; return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
I915_WRITE(fence_reg_lo, 0); intel_uncore_write(uncore, fence_reg_lo, 0);
POSTING_READ(fence_reg_lo); intel_uncore_posting_read(uncore, fence_reg_lo);
I915_WRITE(fence_reg_hi, upper_32_bits(value)); intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value)); intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo); intel_uncore_posting_read(uncore, fence_reg_lo);
} }
static void _clear_vgpu_fence(struct intel_vgpu *vgpu) static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
...@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu) ...@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu) static void free_vgpu_fence(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg; struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
u32 i; u32 i;
if (drm_WARN_ON(&dev_priv->drm, !vgpu_fence_sz(vgpu))) if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return; return;
intel_runtime_pm_get(&dev_priv->runtime_pm); wakeref = intel_runtime_pm_get(uncore->rpm);
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu); _clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) { for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i]; reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg); i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL; vgpu->fence.regs[i] = NULL;
} }
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); intel_runtime_pm_put(uncore->rpm, wakeref);
} }
static int alloc_vgpu_fence(struct intel_vgpu *vgpu) static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_uncore *uncore = gvt->gt->uncore;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct i915_fence_reg *reg; struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
int i; int i;
intel_runtime_pm_get(rpm); wakeref = intel_runtime_pm_get(uncore->rpm);
/* Request fences from host */ /* Request fences from host */
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gvt->gt->ggtt->vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) { for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(&dev_priv->ggtt); reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg)) if (IS_ERR(reg))
goto out_free_fence; goto out_free_fence;
...@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) ...@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu); _clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(rpm); intel_runtime_pm_put(uncore->rpm, wakeref);
return 0; return 0;
out_free_fence: out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n"); gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */ /* Return fences to host, if fail */
...@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) ...@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
i915_unreserve_fence(reg); i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL; vgpu->fence.regs[i] = NULL;
} }
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(rpm); intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC; return -ENOSPC;
} }
...@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) ...@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/ */
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_gvt *gvt = vgpu->gvt;
intel_wakeref_t wakeref;
intel_runtime_pm_get(&dev_priv->runtime_pm); with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
_clear_vgpu_fence(vgpu); _clear_vgpu_fence(vgpu);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
} }
/** /**
......
...@@ -106,7 +106,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -106,7 +106,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 4)) if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL; return -EINVAL;
...@@ -300,7 +300,7 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -300,7 +300,7 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret; int ret;
if (drm_WARN_ON(&i915->drm, bytes > 4)) if (drm_WARN_ON(&i915->drm, bytes > 4))
...@@ -396,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, ...@@ -396,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 0); pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2); pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
} }
......
...@@ -462,7 +462,7 @@ enum { ...@@ -462,7 +462,7 @@ enum {
struct parser_exec_state { struct parser_exec_state {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ring_id; const struct intel_engine_cs *engine;
int buf_type; int buf_type;
...@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { ...@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
}, },
}; };
static inline u32 get_opcode(u32 cmd, int ring_id) static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{ {
const struct decode_info *d_info; const struct decode_info *d_info;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL) if (d_info == NULL)
return INVALID_OP; return INVALID_OP;
return cmd >> (32 - d_info->op_len); return cmd >> (32 - d_info->op_len);
} }
static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, static inline const struct cmd_info *
unsigned int opcode, int ring_id) find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
const struct intel_engine_cs *engine)
{ {
struct cmd_entry *e; struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) if (opcode == e->info->opcode &&
e->info->rings & engine->mask)
return e->info; return e->info;
} }
return NULL; return NULL;
} }
static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt, static inline const struct cmd_info *
u32 cmd, int ring_id) get_cmd_info(struct intel_gvt *gvt, u32 cmd,
const struct intel_engine_cs *engine)
{ {
u32 opcode; u32 opcode;
opcode = get_opcode(cmd, ring_id); opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP) if (opcode == INVALID_OP)
return NULL; return NULL;
return find_cmd_entry(gvt, opcode, ring_id); return find_cmd_entry(gvt, opcode, engine);
} }
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
...@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) ...@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
return (cmd >> low) & ((1U << (hi - low + 1)) - 1); return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
} }
static inline void print_opcode(u32 cmd, int ring_id) static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{ {
const struct decode_info *d_info; const struct decode_info *d_info;
int i; int i;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL) if (d_info == NULL)
return; return;
...@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s) ...@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0; int cnt = 0;
int i; int i;
gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, " ring_head(%08lx) ring_tail(%08lx)\n",
s->ring_id, s->ring_start, s->ring_start + s->ring_size, s->vgpu->id, s->engine->name,
s->ring_head, s->ring_tail); s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ? s->buf_type == RING_BUFFER_INSTRUCTION ?
...@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s) ...@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
s->ip_va, cmd_val(s, 0), cmd_val(s, 1), s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3)); cmd_val(s, 2), cmd_val(s, 3));
print_opcode(cmd_val(s, 0), s->ring_id); print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
...@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, ...@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int data; unsigned int data;
u32 ring_base; u32 ring_base;
u32 nopid; u32 nopid;
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri")) if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1); data = cmd_val(s, index + 1);
...@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, ...@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return -EINVAL; return -EINVAL;
} }
ring_base = dev_priv->engine[s->ring_id]->mmio_base; ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base)); nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) && if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
...@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with * update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values * inhibit context will restore with correct values
*/ */
if (IS_GEN(gvt->dev_priv, 9) && if (IS_GEN(s->engine->i915, 9) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) && intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) { !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu, intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */ /* check inhibit context */
...@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s) ...@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{ {
int i, ret = 0; int i, ret = 0;
int cmd_len = cmd_length(s); int cmd_len = cmd_length(s);
struct intel_gvt *gvt = s->vgpu->gvt;
u32 valid_len = CMD_LEN(1); u32 valid_len = CMD_LEN(1);
/* /*
...@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s) ...@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s)
} }
for (i = 1; i < cmd_len; i += 2) { for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
if (s->ring_id == BCS0 && if (s->engine->id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0; ret |= 0;
else else
...@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s) ...@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
int cmd_len = cmd_length(s); int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) { for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(s->vgpu->gvt->dev_priv)) if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) || ret |= ((cmd_reg_inhibit(s, i) ||
(cmd_reg_inhibit(s, i + 1)))) ? (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0; -EBADRQC : 0;
if (ret) if (ret)
break; break;
...@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s) ...@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
int cmd_len = cmd_length(s); int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) { for (i = 1; i < cmd_len;) {
if (IS_BROADWELL(gvt->dev_priv)) if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret) if (ret)
break; break;
...@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ...@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
if (ret) if (ret)
return ret; return ret;
if (index_mode) { if (index_mode) {
hws_pga = s->vgpu->hws_pga[s->ring_id]; hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma; gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma); patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21)); val = cmd_val(s, 1) & (~(1 << 21));
...@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ...@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
return ret; return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify, set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
s->workload->pending_events); s->workload->pending_events);
return 0; return 0;
} }
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{ {
set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
s->workload->pending_events); s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP); patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0; return 0;
} }
...@@ -1213,7 +1215,7 @@ struct plane_code_mapping { ...@@ -1213,7 +1215,7 @@ struct plane_code_mapping {
static int gen8_decode_mi_display_flip(struct parser_exec_state *s, static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = { struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE}, [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE}, [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
...@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
static int skl_decode_mi_display_flip(struct parser_exec_state *s, static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0); u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1); u32 dword1 = cmd_val(s, 1);
...@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
static int gen8_check_mi_display_flip(struct parser_exec_state *s, static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile; u32 stride, tile;
if (!info->async_flip) if (!info->async_flip)
return 0; return 0;
if (INTEL_GEN(dev_priv) >= 9) { if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10; GENMASK(12, 10)) >> 10;
...@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s, struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
...@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
static int decode_mi_display_flip(struct parser_exec_state *s, static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; if (IS_BROADWELL(s->engine->i915))
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info); return gen8_decode_mi_display_flip(s, info);
if (INTEL_GEN(dev_priv) >= 9) if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info); return skl_decode_mi_display_flip(s, info);
return -ENODEV; return -ENODEV;
...@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) ...@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (ret) if (ret)
return ret; return ret;
if (index_mode) { if (index_mode) {
hws_pga = s->vgpu->hws_pga[s->ring_id]; hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma; gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma); patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21)); val = cmd_val(s, 0) & (~(1 << 21));
...@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) ...@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
} }
/* Check notify bit */ /* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8))) if ((cmd_val(s, 0) & (1 << 8)))
set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw, set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
s->workload->pending_events); s->workload->pending_events);
return ret; return ret;
} }
...@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, ...@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
static int batch_buffer_needs_scan(struct parser_exec_state *s) static int batch_buffer_needs_scan(struct parser_exec_state *s)
{ {
/* Decide privilege based on address space */ /* Decide privilege based on address space */
if (cmd_val(s, 0) & (1 << 8) && if (cmd_val(s, 0) & BIT(8) &&
!(s->vgpu->scan_nonprivbb & (1 << s->ring_id))) !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0; return 0;
return 1; return 1;
} }
static const char *repr_addr_type(unsigned int type)
{
return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
}
static int find_bb_size(struct parser_exec_state *s, static int find_bb_size(struct parser_exec_state *s,
unsigned long *bb_size, unsigned long *bb_size,
unsigned long *bb_end_cmd_offset) unsigned long *bb_end_cmd_offset)
...@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s, ...@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s,
return -EFAULT; return -EFAULT;
cmd = cmd_val(s, 0); cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) { if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
cmd, get_opcode(cmd, s->ring_id), cmd, get_opcode(cmd, s->engine),
(s->buf_addr_type == PPGTT_BUFFER) ? repr_addr_type(s->buf_addr_type),
"ppgtt" : "ggtt", s->ring_id, s->workload); s->engine->name, s->workload);
return -EBADRQC; return -EBADRQC;
} }
do { do {
if (copy_gma_to_hva(s->vgpu, mm, if (copy_gma_to_hva(s->vgpu, mm,
gma, gma + 4, &cmd) < 0) gma, gma + 4, &cmd) < 0)
return -EFAULT; return -EFAULT;
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) { if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
cmd, get_opcode(cmd, s->ring_id), cmd, get_opcode(cmd, s->engine),
(s->buf_addr_type == PPGTT_BUFFER) ? repr_addr_type(s->buf_addr_type),
"ppgtt" : "ggtt", s->ring_id, s->workload); s->engine->name, s->workload);
return -EBADRQC; return -EBADRQC;
} }
...@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va) ...@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
u32 cmd = *(u32 *)va; u32 cmd = *(u32 *)va;
const struct cmd_info *info; const struct cmd_info *info;
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) { if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
cmd, get_opcode(cmd, s->ring_id), cmd, get_opcode(cmd, s->engine),
(s->buf_addr_type == PPGTT_BUFFER) ? repr_addr_type(s->buf_addr_type),
"ppgtt" : "ggtt", s->ring_id, s->workload); s->engine->name, s->workload);
return -EBADRQC; return -EBADRQC;
} }
...@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (bb->ppgtt) if (bb->ppgtt)
start_offset = gma & ~I915_GTT_PAGE_MASK; start_offset = gma & ~I915_GTT_PAGE_MASK;
bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv, bb->obj = i915_gem_object_create_shmem(s->engine->i915,
round_up(bb_size + start_offset, round_up(bb_size + start_offset,
PAGE_SIZE)); PAGE_SIZE));
if (IS_ERR(bb->obj)) { if (IS_ERR(bb->obj)) {
...@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s) ...@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (cmd == MI_NOOP) if (cmd == MI_NOOP)
info = &cmd_info[mi_noop_index]; info = &cmd_info[mi_noop_index];
else else
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) { if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
cmd, get_opcode(cmd, s->ring_id), cmd, get_opcode(cmd, s->engine),
(s->buf_addr_type == PPGTT_BUFFER) ? repr_addr_type(s->buf_addr_type),
"ppgtt" : "ggtt", s->ring_id, s->workload); s->engine->name, s->workload);
return -EBADRQC; return -EBADRQC;
} }
s->info = info; s->info = info;
trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va, trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type, cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name); s->workload, info->name);
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
ret = gvt_check_valid_cmd_length(cmd_length(s), ret = gvt_check_valid_cmd_length(cmd_length(s),
info->valid_len); info->valid_len);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER; s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu; s.vgpu = workload->vgpu;
s.ring_id = workload->ring_id; s.engine = workload->engine;
s.ring_start = workload->rb_start; s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head; s.ring_head = gma_head;
...@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.workload = workload; s.workload = workload;
s.is_ctx_wa = false; s.is_ctx_wa = false;
if ((bypass_scan_mask & (1 << workload->ring_id)) || if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
gma_head == gma_tail)
return 0; return 0;
ret = ip_gma_set(&s, gma_head); ret = ip_gma_set(&s, gma_head);
...@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER; s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu; s.vgpu = workload->vgpu;
s.ring_id = workload->ring_id; s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma; s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size; s.ring_size = ring_size;
s.ring_head = gma_head; s.ring_head = gma_head;
...@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size; unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va; void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
int ret; int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
...@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail; gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size; gma_top = workload->rb_start + guest_rb_size;
if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) { if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p; void *p;
/* realloc the new ring buffer if needed */ /* realloc the new ring buffer if needed */
p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len, p = krealloc(s->ring_scan_buffer[workload->engine->id],
GFP_KERNEL); workload->rb_len, GFP_KERNEL);
if (!p) { if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM; return -ENOMEM;
} }
s->ring_scan_buffer[ring_id] = p; s->ring_scan_buffer[workload->engine->id] = p;
s->ring_scan_buffer_size[ring_id] = workload->rb_len; s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
} }
shadow_ring_buffer_va = s->ring_scan_buffer[ring_id]; shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */ /* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va; workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
...@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0; int ret = 0;
void *map; void *map;
obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv, obj = i915_gem_object_create_shmem(workload->engine->i915,
roundup(ctx_size + CACHELINE_BYTES, roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE)); PAGE_SIZE));
if (IS_ERR(obj)) if (IS_ERR(obj))
...@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0; return 0;
} }
static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, unsigned long rings)
{
const struct cmd_info *info = NULL;
unsigned int ring;
for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
info = find_cmd_entry(gvt, opcode, ring);
if (info)
break;
}
return info;
}
static int init_cmd_table(struct intel_gvt *gvt) static int init_cmd_table(struct intel_gvt *gvt)
{ {
unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i; int i;
struct cmd_entry *e;
const struct cmd_info *info;
unsigned int gen_type;
gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) { for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
struct cmd_entry *e;
if (!(cmd_info[i].devices & gen_type)) if (!(cmd_info[i].devices & gen_type))
continue; continue;
...@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt) ...@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
return -ENOMEM; return -ENOMEM;
e->info = &cmd_info[i]; e->info = &cmd_info[i];
info = find_cmd_entry_any_ring(gvt,
e->info->opcode, e->info->rings);
if (info) {
gvt_err("%s %s duplicated\n", e->info->name,
info->name);
kfree(e);
return -EEXIST;
}
if (cmd_info[i].opcode == OP_MI_NOOP) if (cmd_info[i].opcode == OP_MI_NOOP)
mi_noop_index = i; mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist); INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e); add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n", gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
e->info->name, e->info->opcode, e->info->flag, e->info->name, e->info->opcode, e->info->flag,
e->info->devices, e->info->rings); e->info->devices, e->info->rings);
} }
return 0; return 0;
} }
......
...@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv, ...@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt, static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data) u32 offset, void *data)
{ {
struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data; struct mmio_diff_param *param = data;
struct diff_mmio *node; struct diff_mmio *node;
u32 preg, vreg; u32 preg, vreg;
preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset)); preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset); vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) { if (preg != vreg) {
...@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) ...@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock); spin_lock_bh(&gvt->scheduler.mmio_context_lock);
mmio_hw_access_pre(gvt->dev_priv); mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */ /* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param); intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
mmio_hw_access_post(gvt->dev_priv); mmio_hw_access_post(gvt->gt);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock); spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
...@@ -128,6 +127,7 @@ static int ...@@ -128,6 +127,7 @@ static int
vgpu_scan_nonprivbb_get(void *data, u64 *val) vgpu_scan_nonprivbb_get(void *data, u64 *val)
{ {
struct intel_vgpu *vgpu = (struct intel_vgpu *)data; struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
*val = vgpu->scan_nonprivbb; *val = vgpu->scan_nonprivbb;
return 0; return 0;
} }
...@@ -142,42 +142,7 @@ static int ...@@ -142,42 +142,7 @@ static int
vgpu_scan_nonprivbb_set(void *data, u64 val) vgpu_scan_nonprivbb_set(void *data, u64 val)
{ {
struct intel_vgpu *vgpu = (struct intel_vgpu *)data; struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum intel_engine_id id;
char buf[128], *s;
int len;
val &= (1 << I915_NUM_ENGINES) - 1;
if (vgpu->scan_nonprivbb == val)
return 0;
if (!val)
goto done;
len = sprintf(buf,
"gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
vgpu->id);
s = buf + len;
for (id = 0; id < I915_NUM_ENGINES; id++) {
struct intel_engine_cs *engine;
engine = dev_priv->engine[id];
if (engine && (val & (1 << id))) {
len = snprintf(s, 4, "%d, ", engine->id);
s += len;
} else
val &= ~(1 << id);
}
if (val)
sprintf(s, "low performance expected.");
pr_warn("%s\n", buf);
done:
vgpu->scan_nonprivbb = val; vgpu->scan_nonprivbb = val;
return 0; return 0;
} }
...@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) ...@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/ */
void intel_gvt_debugfs_init(struct intel_gvt *gvt) void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{ {
struct drm_minor *minor = gvt->dev_priv->drm.primary; struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
......
...@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu) ...@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0; return 0;
...@@ -69,7 +69,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) ...@@ -69,7 +69,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&dev_priv->drm, if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES)) pipe < PIPE_A || pipe >= I915_MAX_PIPES))
...@@ -169,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { ...@@ -169,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu) static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe; int pipe;
if (IS_BROXTON(dev_priv)) { if (IS_BROXTON(dev_priv)) {
...@@ -320,7 +320,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) ...@@ -320,7 +320,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution) int type, unsigned int resolution)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM)) if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
...@@ -391,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) ...@@ -391,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq; struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = { int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK, [PIPE_A] = PIPE_A_VBLANK,
...@@ -423,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu) ...@@ -423,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe; int pipe;
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe) for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe); emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
} }
...@@ -456,10 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) ...@@ -456,10 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/ */
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */ /* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915)) {
if (connected) { if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED; SFUSE_STRAP_DDID_DETECTED;
...@@ -485,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) ...@@ -485,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/ */
void intel_vgpu_clean_display(struct intel_vgpu *vgpu) void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) IS_COFFEELAKE(dev_priv))
...@@ -507,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) ...@@ -507,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/ */
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
intel_vgpu_init_i2c_edid(vgpu); intel_vgpu_init_i2c_edid(vgpu);
......
...@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, ...@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{ {
struct drm_device *dev = &vgpu->gvt->dev_priv->drm; struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args; struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info; struct intel_vgpu_fb_info fb_info;
...@@ -523,7 +523,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) ...@@ -523,7 +523,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
/* To associate an exposed dmabuf with the dmabuf_obj */ /* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{ {
struct drm_device *dev = &vgpu->gvt->dev_priv->drm; struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
......
...@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu) ...@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu, static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select; int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
...@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, ...@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0) if (pin_select == 0)
return 0; return 0;
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select); port = bxt_get_port_from_gmbus0(pin_select);
else if (IS_COFFEELAKE(dev_priv)) else if (IS_COFFEELAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select); port = cnp_get_port_from_gmbus0(pin_select);
else else
port = get_port_from_gmbus0(pin_select); port = get_port_from_gmbus0(pin_select);
if (drm_WARN_ON(&dev_priv->drm, port < 0)) if (drm_WARN_ON(&i915->drm, port < 0))
return 0; return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS; vgpu->display.i2c_edid.state = I2C_GMBUS;
...@@ -276,7 +276,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -276,7 +276,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
drm_WARN_ON(&i915->drm, 1); drm_WARN_ON(&i915->drm, 1);
return 0; return 0;
...@@ -373,7 +373,7 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -373,7 +373,7 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL; return -EINVAL;
...@@ -403,7 +403,7 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, ...@@ -403,7 +403,7 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu, int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL; return -EINVAL;
...@@ -479,7 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, ...@@ -479,7 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
unsigned int offset, unsigned int offset,
void *p_data) void *p_data)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size; int msg_length, ret_msg_size;
int msg, addr, ctrl, op; int msg, addr, ctrl, op;
......
...@@ -39,8 +39,7 @@ ...@@ -39,8 +39,7 @@
#define _EL_OFFSET_STATUS_BUF 0x370 #define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0 #define _EL_OFFSET_STATUS_PTR 0x3A0
#define execlist_ring_mmio(gvt, ring_id, offset) \ #define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
(gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid) #define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
...@@ -54,12 +53,12 @@ static int context_switch_events[] = { ...@@ -54,12 +53,12 @@ static int context_switch_events[] = {
[VECS0] = VECS_AS_CONTEXT_SWITCH, [VECS0] = VECS_AS_CONTEXT_SWITCH,
}; };
static int ring_id_to_context_switch_event(unsigned int ring_id) static int to_context_switch_event(const struct intel_engine_cs *engine)
{ {
if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL; return -EINVAL;
return context_switch_events[ring_id]; return context_switch_events[engine->id];
} }
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist) static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
...@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) ...@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
struct execlist_ctx_descriptor_format *desc = execlist->running_context; struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu; struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status; struct execlist_status_format status;
int ring_id = execlist->ring_id; u32 status_reg =
u32 status_reg = execlist_ring_mmio(vgpu->gvt, execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
ring_id, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg); status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4); status.udw = vgpu_vreg(vgpu, status_reg + 4);
...@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) ...@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
} }
static void emulate_csb_update(struct intel_vgpu_execlist *execlist, static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
struct execlist_context_status_format *status, struct execlist_context_status_format *status,
bool trigger_interrupt_later) bool trigger_interrupt_later)
{ {
struct intel_vgpu *vgpu = execlist->vgpu; struct intel_vgpu *vgpu = execlist->vgpu;
int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr; struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer; u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa; unsigned long hwsp_gpa;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, ctx_status_ptr_reg =
_EL_OFFSET_STATUS_PTR); execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, ctx_status_buf_reg =
_EL_OFFSET_STATUS_BUF); execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
...@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, ...@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
/* Update the CSB and CSB write pointer in HWSP */ /* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
vgpu->hws_pga[ring_id]); vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu, intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
write_pointer * 8, status, 8);
status, 8);
intel_gvt_hypervisor_write_gpa(vgpu, intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
intel_hws_csb_write_index(dev_priv) * 4, &write_pointer, 4);
&write_pointer, 4);
} }
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
vgpu->id, write_pointer, offset, status->ldw, status->udw); vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later) if (trigger_interrupt_later)
return; return;
intel_vgpu_trigger_virtual_event(vgpu, intel_vgpu_trigger_virtual_event(vgpu,
ring_id_to_context_switch_event(execlist->ring_id)); to_context_switch_event(execlist->engine));
} }
static int emulate_execlist_ctx_schedule_out( static int emulate_execlist_ctx_schedule_out(
...@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot( ...@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist) struct intel_vgpu_execlist *execlist)
{ {
struct intel_vgpu *vgpu = execlist->vgpu; struct intel_vgpu *vgpu = execlist->vgpu;
int ring_id = execlist->ring_id; u32 status_reg =
u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
_EL_OFFSET_STATUS);
struct execlist_status_format status; struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg); status.ldw = vgpu_vreg(vgpu, status_reg);
...@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2]; struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
int ret; int ret;
if (!workload->emulate_schedule_in) if (!workload->emulate_schedule_in)
...@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx); ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
ctx);
if (ret) { if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n"); gvt_vgpu_err("fail to emulate execlist schedule in\n");
return ret; return ret;
...@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload) static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct intel_vgpu_execlist *execlist =
&s->execlist[workload->engine->id];
struct intel_vgpu_workload *next_workload; struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, ring_id)->next; struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
bool lite_restore = false; bool lite_restore = false;
int ret = 0; int ret = 0;
gvt_dbg_el("complete workload %p status %d\n", workload, gvt_dbg_el("complete workload %p status %d\n",
workload->status); workload, workload->status);
if (workload->status || (vgpu->resetting_eng & BIT(ring_id))) if (workload->status || vgpu->resetting_eng & workload->engine->mask)
goto out; goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) { if (!list_empty(workload_q_head(vgpu, workload->engine))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc; struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next, next_workload = container_of(next,
...@@ -436,14 +429,15 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -436,14 +429,15 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
return ret; return ret;
} }
static int submit_context(struct intel_vgpu *vgpu, int ring_id, static int submit_context(struct intel_vgpu *vgpu,
struct execlist_ctx_descriptor_format *desc, const struct intel_engine_cs *engine,
bool emulate_schedule_in) struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
workload = intel_vgpu_create_workload(vgpu, ring_id, desc); workload = intel_vgpu_create_workload(vgpu, engine, desc);
if (IS_ERR(workload)) if (IS_ERR(workload))
return PTR_ERR(workload); return PTR_ERR(workload);
...@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->emulate_schedule_in = emulate_schedule_in; workload->emulate_schedule_in = emulate_schedule_in;
if (emulate_schedule_in) if (emulate_schedule_in)
workload->elsp_dwords = s->execlist[ring_id].elsp_dwords; workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in); emulate_schedule_in);
intel_vgpu_queue_workload(workload); intel_vgpu_queue_workload(workload);
return 0; return 0;
} }
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_ctx_descriptor_format *desc[2]; struct execlist_ctx_descriptor_format *desc[2];
int i, ret; int i, ret;
...@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
for (i = 0; i < ARRAY_SIZE(desc); i++) { for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i]->valid) if (!desc[i]->valid)
continue; continue;
ret = submit_context(vgpu, ring_id, desc[i], i == 0); ret = submit_context(vgpu, engine, desc[i], i == 0);
if (ret) { if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i); gvt_vgpu_err("failed to submit desc %d\n", i);
return ret; return ret;
...@@ -504,22 +499,22 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -504,22 +499,22 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
return -EINVAL; return -EINVAL;
} }
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) static void init_vgpu_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_context_status_pointer_format ctx_status_ptr; struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg; u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist)); memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu; execlist->vgpu = vgpu;
execlist->ring_id = ring_id; execlist->engine = engine;
execlist->slot[0].index = 0; execlist->slot[0].index = 0;
execlist->slot[1].index = 1; execlist->slot[1].index = 1;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0; ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7; ctx_status_ptr.write_ptr = 0x7;
...@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
static void clean_execlist(struct intel_vgpu *vgpu, static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask) intel_engine_mask_t engine_mask)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp; intel_engine_mask_t tmp;
...@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu, ...@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask) intel_engine_mask_t engine_mask)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
intel_engine_mask_t tmp; intel_engine_mask_t tmp;
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id); init_vgpu_execlist(vgpu, engine);
} }
static int init_execlist(struct intel_vgpu *vgpu, static int init_execlist(struct intel_vgpu *vgpu,
......
...@@ -170,16 +170,17 @@ struct intel_vgpu_execlist { ...@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot *running_slot; struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot; struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context; struct execlist_ctx_descriptor_format *running_context;
int ring_id;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords; struct intel_vgpu_elsp_dwords elsp_dwords;
const struct intel_engine_cs *engine;
}; };
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu); void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask); intel_engine_mask_t engine_mask);
......
...@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha, ...@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp) u32 tiled, int stride_mask, int bpp)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg; u32 stride = stride_reg;
...@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu) ...@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane) struct intel_vgpu_primary_plane_format *plane)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, fmt; u32 val, fmt;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe; int pipe;
pipe = get_active_pipe(vgpu); pipe = get_active_pipe(vgpu);
...@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode) ...@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane) struct intel_vgpu_cursor_plane_format *plane)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, mode, index; u32 val, mode, index;
u32 alpha_plane, alpha_force; u32 alpha_plane, alpha_force;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe; int pipe;
pipe = get_active_pipe(vgpu); pipe = get_active_pipe(vgpu);
......
...@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = { ...@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{ {
struct drm_i915_private *i915 = gvt->dev_priv; *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
*(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
_MMIO(offset)); _MMIO(offset));
return 0; return 0;
} }
...@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) ...@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt) static int expose_firmware_sysfs(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
void *firmware; void *firmware;
void *p; void *p;
...@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) ...@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt) static void clean_firmware_sysfs(struct intel_gvt *gvt)
{ {
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr); device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private); vfree(firmware_attr.private);
...@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt, ...@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw) const struct firmware *fw)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
unsigned long id, crc32_start; unsigned long id, crc32_start;
const void *mem; const void *mem;
...@@ -208,8 +205,7 @@ static int verify_firmware(struct intel_gvt *gvt, ...@@ -208,8 +205,7 @@ static int verify_firmware(struct intel_gvt *gvt,
int intel_gvt_load_firmware(struct intel_gvt *gvt) int intel_gvt_load_firmware(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware; struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
const struct firmware *fw; const struct firmware *fw;
...@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) ...@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path); gvt_dbg_core("request hw state firmware %s...\n", path);
ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev); ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
kfree(path); kfree(path);
if (ret) if (ret)
......
...@@ -71,7 +71,7 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) ...@@ -71,7 +71,7 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
/* translate a guest gmadr to host gmadr */ /* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
"invalid guest gmadr %llx\n", g_addr)) "invalid guest gmadr %llx\n", g_addr))
...@@ -89,7 +89,7 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) ...@@ -89,7 +89,7 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
/* translate a host gmadr to guest gmadr */ /* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
"invalid host gmadr %llx\n", h_addr)) "invalid host gmadr %llx\n", h_addr))
...@@ -279,24 +279,23 @@ static inline int get_pse_type(int type) ...@@ -279,24 +279,23 @@ static inline int get_pse_type(int type)
return gtt_type_table[type].pse_entry_type; return gtt_type_table[type].pse_entry_type;
} }
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{ {
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
return readq(addr); return readq(addr);
} }
static void ggtt_invalidate(struct drm_i915_private *dev_priv) static void ggtt_invalidate(struct intel_gt *gt)
{ {
mmio_hw_access_pre(dev_priv); mmio_hw_access_pre(gt);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
mmio_hw_access_post(dev_priv); mmio_hw_access_post(gt);
} }
static void write_pte64(struct drm_i915_private *dev_priv, static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
unsigned long index, u64 pte)
{ {
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
writeq(pte, addr); writeq(pte, addr);
} }
...@@ -319,7 +318,7 @@ static inline int gtt_get_entry64(void *pt, ...@@ -319,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
if (WARN_ON(ret)) if (WARN_ON(ret))
return ret; return ret;
} else if (!pt) { } else if (!pt) {
e->val64 = read_pte64(vgpu->gvt->dev_priv, index); e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
} else { } else {
e->val64 = *((u64 *)pt + index); e->val64 = *((u64 *)pt + index);
} }
...@@ -344,7 +343,7 @@ static inline int gtt_set_entry64(void *pt, ...@@ -344,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
if (WARN_ON(ret)) if (WARN_ON(ret))
return ret; return ret;
} else if (!pt) { } else if (!pt) {
write_pte64(vgpu->gvt->dev_priv, index, e->val64); write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
} else { } else {
*((u64 *)pt + index) = e->val64; *((u64 *)pt + index) = e->val64;
} }
...@@ -738,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu, ...@@ -738,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
...@@ -823,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); ...@@ -823,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{ {
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL; struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr; dma_addr_t daddr;
int ret; int ret;
...@@ -944,7 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); ...@@ -944,7 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e) struct intel_gvt_gtt_entry *e)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s; struct intel_vgpu_ppgtt_spt *s;
enum intel_gvt_gtt_type cur_pt_type; enum intel_gvt_gtt_type cur_pt_type;
...@@ -1051,7 +1050,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1051,7 +1050,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
...@@ -1160,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, ...@@ -1160,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn; unsigned long pfn;
if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0; return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
...@@ -2321,7 +2320,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -2321,7 +2320,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ggtt_invalidate_pte(vgpu, &e); ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate(gvt->dev_priv); ggtt_invalidate(gvt->gt);
return 0; return 0;
} }
...@@ -2354,14 +2353,14 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, ...@@ -2354,14 +2353,14 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int alloc_scratch_pages(struct intel_vgpu *vgpu,
enum intel_gvt_gtt_type type) enum intel_gvt_gtt_type type)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = I915_GTT_PAGE_SIZE >> int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift; vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt; void *scratch_pt;
int i; int i;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
if (drm_WARN_ON(&i915->drm, if (drm_WARN_ON(&i915->drm,
...@@ -2419,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, ...@@ -2419,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu) static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{ {
int i; int i;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
...@@ -2691,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2691,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{ {
int ret; int ret;
void *page; void *page;
struct device *dev = &gvt->dev_priv->drm.pdev->dev; struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
gvt_dbg_core("init gtt\n"); gvt_dbg_core("init gtt\n");
...@@ -2740,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2740,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/ */
void intel_gvt_clean_gtt(struct intel_gvt *gvt) void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{ {
struct device *dev = &gvt->dev_priv->drm.pdev->dev; struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT); I915_GTT_PAGE_SHIFT);
...@@ -2788,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) ...@@ -2788,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry; struct intel_gvt_gtt_entry old_entry;
...@@ -2818,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) ...@@ -2818,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
} }
ggtt_invalidate(dev_priv); ggtt_invalidate(gvt->gt);
} }
/** /**
......
...@@ -50,15 +50,15 @@ static const char * const supported_hypervisors[] = { ...@@ -50,15 +50,15 @@ static const char * const supported_hypervisors[] = {
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name) const char *name)
{ {
const char *driver_name =
dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
int i; int i;
struct intel_vgpu_type *t;
const char *driver_name = dev_driver_string(
&gvt->dev_priv->drm.pdev->dev);
name += strlen(driver_name) + 1;
for (i = 0; i < gvt->num_types; i++) { for (i = 0; i < gvt->num_types; i++) {
t = &gvt->types[i]; struct intel_vgpu_type *t = &gvt->types[i];
if (!strncmp(t->name, name + strlen(driver_name) + 1,
sizeof(t->name))) if (!strncmp(t->name, name, sizeof(t->name)))
return t; return t;
} }
...@@ -190,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -190,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
info->max_support_vgpus = 8; info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
...@@ -256,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt) ...@@ -256,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt)
/** /**
* intel_gvt_clean_device - clean a GVT device * intel_gvt_clean_device - clean a GVT device
* @dev_priv: i915 private * @i915: i915 private
* *
* This function is called at the driver unloading stage, to free the * This function is called at the driver unloading stage, to free the
* resources owned by a GVT device. * resources owned by a GVT device.
* *
*/ */
void intel_gvt_clean_device(struct drm_i915_private *dev_priv) void intel_gvt_clean_device(struct drm_i915_private *i915)
{ {
struct intel_gvt *gvt = to_gvt(dev_priv); struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
if (drm_WARN_ON(&dev_priv->drm, !gvt)) if (drm_WARN_ON(&i915->drm, !gvt))
return; return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
...@@ -284,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -284,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt); intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr); idr_destroy(&gvt->vgpu_idr);
kfree(dev_priv->gvt); kfree(i915->gvt);
dev_priv->gvt = NULL;
} }
/** /**
* intel_gvt_init_device - initialize a GVT device * intel_gvt_init_device - initialize a GVT device
* @dev_priv: drm i915 private data * @i915: drm i915 private data
* *
* This function is called at the initialization stage, to initialize * This function is called at the initialization stage, to initialize
* necessary GVT components. * necessary GVT components.
...@@ -299,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -299,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
* *
*/ */
int intel_gvt_init_device(struct drm_i915_private *dev_priv) int intel_gvt_init_device(struct drm_i915_private *i915)
{ {
struct intel_gvt *gvt; struct intel_gvt *gvt;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ret; int ret;
if (drm_WARN_ON(&dev_priv->drm, dev_priv->gvt)) if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST; return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
...@@ -318,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -318,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock); spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock); mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock); mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv; gvt->gt = &i915->gt;
i915->gvt = gvt;
init_device_info(gvt); init_device_info(gvt);
...@@ -377,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -377,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt); intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n"); gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt; intel_gvt_host.dev = &i915->drm.pdev->dev;
intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
intel_gvt_host.initialized = true; intel_gvt_host.initialized = true;
return 0; return 0;
...@@ -403,6 +402,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -403,6 +402,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
out_clean_idr: out_clean_idr:
idr_destroy(&gvt->vgpu_idr); idr_destroy(&gvt->vgpu_idr);
kfree(gvt); kfree(gvt);
i915->gvt = NULL;
return ret; return ret;
} }
......
...@@ -286,7 +286,7 @@ struct intel_gvt { ...@@ -286,7 +286,7 @@ struct intel_gvt {
/* scheduler scope lock, protect gvt and vgpu schedule related data */ /* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock; struct mutex sched_lock;
struct drm_i915_private *dev_priv; struct intel_gt *gt;
struct idr vgpu_idr; /* vGPU IDR pool */ struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info; struct intel_gvt_device_info device_info;
...@@ -356,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -356,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4 #define HOST_FENCE 4
#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
/* Aperture/GM space definitions for GVT device */ /* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total) #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
#define gvt_ggtt_sz(gvt) \ #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3) #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0) #define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
...@@ -374,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -374,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1) + gvt_hidden_sz(gvt) - 1)
#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences) #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
/* Aperture/GM space definitions for vGPU */ /* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
...@@ -565,14 +566,14 @@ enum { ...@@ -565,14 +566,14 @@ enum {
GVT_FAILSAFE_GUEST_ERR, GVT_FAILSAFE_GUEST_ERR,
}; };
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) static inline void mmio_hw_access_pre(struct intel_gt *gt)
{ {
intel_runtime_pm_get(&dev_priv->runtime_pm); intel_runtime_pm_get(gt->uncore->rpm);
} }
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) static inline void mmio_hw_access_post(struct intel_gt *gt)
{ {
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); intel_runtime_pm_put_unchecked(gt->uncore->rpm);
} }
/** /**
......
...@@ -49,15 +49,17 @@ ...@@ -49,15 +49,17 @@
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{ {
if (IS_BROADWELL(gvt->dev_priv)) struct drm_i915_private *i915 = gvt->gt->i915;
if (IS_BROADWELL(i915))
return D_BDW; return D_BDW;
else if (IS_SKYLAKE(gvt->dev_priv)) else if (IS_SKYLAKE(i915))
return D_SKL; return D_SKL;
else if (IS_KABYLAKE(gvt->dev_priv)) else if (IS_KABYLAKE(i915))
return D_KBL; return D_KBL;
else if (IS_BROXTON(gvt->dev_priv)) else if (IS_BROXTON(i915))
return D_BXT; return D_BXT;
else if (IS_COFFEELAKE(gvt->dev_priv)) else if (IS_COFFEELAKE(i915))
return D_CFL; return D_CFL;
return 0; return 0;
...@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt, ...@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
} }
/** /**
* intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
* @gvt: a GVT device * @gvt: a GVT device
* @offset: register offset * @offset: register offset
* *
* Returns: * Returns:
* Ring ID on success, negative error code if failed. * The engine containing the offset within its mmio page.
*/ */
int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, const struct intel_engine_cs *
unsigned int offset) intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
{ {
enum intel_engine_id id;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id;
offset &= ~GENMASK(11, 0); offset &= ~GENMASK(11, 0);
for_each_engine(engine, gvt->dev_priv, id) { for_each_engine(engine, gvt->gt, id)
if (engine->mmio_base == offset) if (engine->mmio_base == offset)
return id; return engine;
}
return -ENODEV; return NULL;
} }
#define offset_to_fence_num(offset) \ #define offset_to_fence_num(offset) \
...@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu, ...@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{ {
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) { if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips) else if (!ips)
...@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off, ...@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off); unsigned int fence_num = offset_to_fence_num(off);
int ret; int ret;
...@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret; return ret;
write_vreg(vgpu, off, p_data, bytes); write_vreg(vgpu, off, p_data, bytes);
mmio_hw_access_pre(dev_priv); mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num, intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
mmio_hw_access_post(dev_priv); mmio_hw_access_post(gvt->gt);
return 0; return 0;
} }
...@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, ...@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset); old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) { if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
switch (offset) { switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG: case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
...@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
} }
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
} }
/* vgpu_lock already hold by emulate mmio r/w */ /* vgpu_lock already hold by emulate mmio r/w */
...@@ -492,7 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = { ...@@ -492,7 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
}; };
/* a simple bsearch */ /* a simple bsearch */
static inline bool in_whitelist(unsigned int reg) static inline bool in_whitelist(u32 reg)
{ {
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list; i915_reg_t *array = force_nonpriv_white_list;
...@@ -514,26 +516,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, ...@@ -514,26 +516,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2); u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); const struct intel_engine_cs *engine =
u32 ring_base; intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret = -EINVAL;
if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
vgpu->id, ring_id, offset, bytes);
return ret;
}
ring_base = dev_priv->engine[ring_id]->mmio_base; if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
vgpu->id, offset, bytes);
return -EINVAL;
}
if (in_whitelist(reg_nonpriv) || if (!in_whitelist(reg_nonpriv) &&
reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) { reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
} else
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n", gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
vgpu->id, *(u32 *)p_data, offset); vgpu->id, reg_nonpriv, offset);
} else
intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
return 0; return 0;
} }
...@@ -756,7 +753,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu, ...@@ -756,7 +753,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 pipe = DSPSURF_TO_PIPE(offset); u32 pipe = DSPSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
...@@ -797,7 +794,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu, ...@@ -797,7 +794,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int offset, void *p_data,
unsigned int bytes) unsigned int bytes)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum pipe pipe = REG_50080_TO_PIPE(offset); enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset); enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane); int event = SKL_FLIP_EVENT(pipe, plane);
...@@ -821,7 +818,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu, ...@@ -821,7 +818,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
unsigned int reg) unsigned int reg)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum intel_gvt_event_type event; enum intel_gvt_event_type event;
if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
...@@ -924,11 +921,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, ...@@ -924,11 +921,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset); data = vgpu_vreg(vgpu, offset);
if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9) if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */ /* SKL DPB/C/D aux ctl register changed */
return 0; return 0;
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) && } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) { offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
/* write to the data registers */ /* write to the data registers */
return 0; return 0;
...@@ -1244,8 +1241,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) ...@@ -1244,8 +1241,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *env[3] = {NULL, NULL, NULL}; char *env[3] = {NULL, NULL, NULL};
char vmid_str[20]; char vmid_str[20];
char display_ready_str[20]; char display_ready_str[20];
...@@ -1306,7 +1302,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1306,7 +1302,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int pf_write(struct intel_vgpu *vgpu, static int pf_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 val = *(u32 *)p_data; u32 val = *(u32 *)p_data;
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
...@@ -1362,7 +1358,7 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, ...@@ -1362,7 +1358,7 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 mode; u32 mode;
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
...@@ -1381,7 +1377,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1381,7 +1377,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 trtte = *(u32 *)p_data; u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) { if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
...@@ -1433,9 +1429,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1433,9 +1429,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) { switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY: case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
|| IS_KABYLAKE(vgpu->gvt->dev_priv) IS_KABYLAKE(vgpu->gvt->gt->i915) ||
|| IS_COFFEELAKE(vgpu->gvt->dev_priv)) { IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
/** /**
* "Read memory latency" command on gen9. * "Read memory latency" command on gen9.
* Below memory latency values are read * Below memory latency values are read
...@@ -1445,7 +1441,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1445,7 +1441,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100; *data0 = 0x1e1a1100;
else else
*data0 = 0x61514b3d; *data0 = 0x61514b3d;
} else if (IS_BROXTON(vgpu->gvt->dev_priv)) { } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
/** /**
* "Read memory latency" command on gen9. * "Read memory latency" command on gen9.
* Below memory latency values are read * Below memory latency values are read
...@@ -1458,9 +1454,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1458,9 +1454,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
} }
break; break;
case SKL_PCODE_CDCLK_CONTROL: case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
|| IS_KABYLAKE(vgpu->gvt->dev_priv) IS_KABYLAKE(vgpu->gvt->gt->i915) ||
|| IS_COFFEELAKE(vgpu->gvt->dev_priv)) IS_COFFEELAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE; *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break; break;
case GEN6_PCODE_READ_RC6VIDS: case GEN6_PCODE_READ_RC6VIDS:
...@@ -1484,24 +1480,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1484,24 +1480,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
u32 value = *(u32 *)p_data; u32 value = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); const struct intel_engine_cs *engine =
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value); offset, value);
return -EINVAL; return -EINVAL;
} }
/* /*
* Need to emulate all the HWSP register write to ensure host can * Need to emulate all the HWSP register write to ensure host can
* update the VM CSB status correctly. Here listed registers can * update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers. * support BDW, SKL or other platforms with same HWSP registers.
*/ */
if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { if (unlikely(!engine)) {
gvt_vgpu_err("access unknown hardware status page register:0x%x\n", gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
offset); offset);
return -EINVAL; return -EINVAL;
} }
vgpu->hws_pga[ring_id] = value; vgpu->hws_pga[engine->id] = value;
gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n", gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
vgpu->id, value, offset); vgpu->id, value, offset);
...@@ -1513,7 +1511,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, ...@@ -1513,7 +1511,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{ {
u32 v = *(u32 *)p_data; u32 v = *(u32 *)p_data;
if (IS_BROXTON(vgpu->gvt->dev_priv)) if (IS_BROXTON(vgpu->gvt->gt->i915))
v &= (1 << 31) | (1 << 29); v &= (1 << 31) | (1 << 29);
else else
v &= (1 << 31) | (1 << 29) | (1 << 9) | v &= (1 << 31) | (1 << 29) | (1 << 9) |
...@@ -1660,26 +1658,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, ...@@ -1660,26 +1658,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; const struct intel_engine_cs *engine =
int ring_id; intel_gvt_render_mmio_to_engine(gvt, offset);
u32 ring_base;
ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/** /**
* Read HW reg in following case * Read HW reg in following case
* a. the offset isn't a ring mmio * a. the offset isn't a ring mmio
* b. the offset's ring is running on hw. * b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio * c. the offset is ring time stamp mmio
*/ */
if (ring_id >= 0)
ring_base = dev_priv->engine[ring_id]->mmio_base; if (!engine ||
vgpu == gvt->scheduler.engine_owner[engine->id] ||
if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] || offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) || offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) { mmio_hw_access_pre(gvt->gt);
mmio_hw_access_pre(dev_priv); vgpu_vreg(vgpu, offset) =
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
mmio_hw_access_post(dev_priv); mmio_hw_access_post(gvt->gt);
} }
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
...@@ -1688,23 +1684,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, ...@@ -1688,23 +1684,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist; struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ret = 0; int ret = 0;
if (drm_WARN_ON(&i915->drm, ring_id < 0 || ring_id >= I915_NUM_ENGINES)) if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL; return -EINVAL;
execlist = &vgpu->submission.execlist[ring_id]; execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) { if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id); ret = intel_vgpu_submit_execlist(vgpu, engine);
if(ret) if(ret)
gvt_vgpu_err("fail submit workload on ring %d\n", gvt_vgpu_err("fail submit workload on ring %s\n",
ring_id); engine->name);
} }
++execlist->elsp_dwords.index; ++execlist->elsp_dwords.index;
...@@ -1716,12 +1712,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1716,12 +1712,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); const struct intel_engine_cs *engine =
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
bool enable_execlist; bool enable_execlist;
int ret; int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1); (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
if (IS_COFFEELAKE(vgpu->gvt->dev_priv)) if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
...@@ -1730,7 +1727,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1730,7 +1727,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0; return 0;
} }
if (IS_COFFEELAKE(vgpu->gvt->dev_priv) && if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
data & _MASKED_BIT_ENABLE(2)) { data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0; return 0;
...@@ -1750,16 +1747,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1750,16 +1747,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %d\n", gvt_dbg_core("EXECLIST %s on ring %s\n",
(enable_execlist ? "enabling" : "disabling"), (enable_execlist ? "enabling" : "disabling"),
ring_id); engine->name);
if (!enable_execlist) if (!enable_execlist)
return 0; return 0;
ret = intel_vgpu_select_submission_ops(vgpu, ret = intel_vgpu_select_submission_ops(vgpu,
BIT(ring_id), engine->mask,
INTEL_VGPU_EXECLIST_SUBMISSION); INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret) if (ret)
return ret; return ret;
...@@ -1883,7 +1880,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, ...@@ -1883,7 +1880,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt) static int init_generic_mmio_info(struct intel_gvt *gvt)
{ {
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret; int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
...@@ -2700,7 +2697,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2700,7 +2697,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt) static int init_bdw_mmio_info(struct intel_gvt *gvt)
{ {
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret; int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
...@@ -2889,7 +2886,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) ...@@ -2889,7 +2886,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt) static int init_skl_mmio_info(struct intel_gvt *gvt)
{ {
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret; int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
...@@ -3138,7 +3135,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -3138,7 +3135,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt) static int init_bxt_mmio_info(struct intel_gvt *gvt)
{ {
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret; int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
...@@ -3374,7 +3371,7 @@ static struct gvt_mmio_block mmio_blocks[] = { ...@@ -3374,7 +3371,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *i915 = gvt->gt->i915;
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute); int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
int ret; int ret;
...@@ -3386,20 +3383,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ...@@ -3386,20 +3383,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret) if (ret)
goto err; goto err;
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(i915)) {
ret = init_bdw_mmio_info(gvt); ret = init_bdw_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
} else if (IS_SKYLAKE(dev_priv) } else if (IS_SKYLAKE(i915) ||
|| IS_KABYLAKE(dev_priv) IS_KABYLAKE(i915) ||
|| IS_COFFEELAKE(dev_priv)) { IS_COFFEELAKE(i915)) {
ret = init_bdw_mmio_info(gvt); ret = init_bdw_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
ret = init_skl_mmio_info(gvt); ret = init_skl_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(i915)) {
ret = init_bdw_mmio_info(gvt); ret = init_bdw_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
...@@ -3548,7 +3545,7 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, ...@@ -3548,7 +3545,7 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read) void *pdata, unsigned int bytes, bool is_read)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info; struct intel_gvt_mmio_info *mmio_info;
struct gvt_mmio_block *mmio_block; struct gvt_mmio_block *mmio_block;
......
...@@ -244,8 +244,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, ...@@ -244,8 +244,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes) unsigned int reg, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info; struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data; u32 ier = *(u32 *)p_data;
...@@ -283,7 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, ...@@ -283,7 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt, struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg)); iir_to_regbase(reg));
u32 iir = *(u32 *)p_data; u32 iir = *(u32 *)p_data;
...@@ -321,7 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = { ...@@ -321,7 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu, static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info) struct intel_gvt_irq_info *info)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq; struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map; struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL; struct intel_gvt_irq_info *up_irq_info = NULL;
...@@ -540,7 +540,7 @@ static void gen8_init_irq( ...@@ -540,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
if (HAS_ENGINE(gvt->dev_priv, VCS1)) { if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1); INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
...@@ -572,7 +572,7 @@ static void gen8_init_irq( ...@@ -572,7 +572,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
if (IS_BROADWELL(gvt->dev_priv)) { if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
...@@ -585,7 +585,7 @@ static void gen8_init_irq( ...@@ -585,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (INTEL_GEN(gvt->dev_priv) >= 9) { } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
...@@ -622,7 +622,7 @@ static struct intel_gvt_irq_ops gen8_irq_ops = { ...@@ -622,7 +622,7 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event) enum intel_gvt_event_type event)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq; struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler; gvt_event_virt_handler_t handler;
......
...@@ -150,7 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); ...@@ -150,7 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size) unsigned long size)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int total_pages; int total_pages;
int npage; int npage;
int ret; int ret;
...@@ -218,7 +218,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -218,7 +218,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size) dma_addr_t *dma_addr, unsigned long size)
{ {
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct page *page = NULL; struct page *page = NULL;
int ret; int ret;
...@@ -241,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -241,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size) dma_addr_t dma_addr, unsigned long size)
{ {
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size); gvt_unpin_guest_page(vgpu, gfn, size);
...@@ -855,7 +855,7 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) ...@@ -855,7 +855,7 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
static void __intel_vgpu_release(struct intel_vgpu *vgpu) static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
int ret; int ret;
...@@ -963,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, ...@@ -963,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EINVAL; return -EINVAL;
} }
aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
ALIGN_DOWN(off, PAGE_SIZE), ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off)); count + offset_in_page(off));
if (!aperture_va) if (!aperture_va)
......
...@@ -102,8 +102,8 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa, ...@@ -102,8 +102,8 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0; unsigned int offset = 0;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -177,8 +177,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, ...@@ -177,8 +177,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0; unsigned int offset = 0;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -251,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -251,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */ /* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
if (IS_BROXTON(vgpu->gvt->dev_priv)) { if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1)); ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
......
...@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info { ...@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node; struct hlist_node node;
}; };
int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, const struct intel_engine_cs *
unsigned int reg); intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
......
...@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = { ...@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
[VECS0] = 0xcb00, [VECS0] = 0xcb00,
}; };
static void load_render_mocs(struct drm_i915_private *dev_priv) static void load_render_mocs(const struct intel_engine_cs *engine)
{ {
struct intel_gvt *gvt = dev_priv->gvt; struct intel_gvt *gvt = engine->i915->gvt;
i915_reg_t offset; struct intel_uncore *uncore = engine->uncore;
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
i915_reg_t offset;
int ring_id, i; int ring_id, i;
/* Platform doesn't have mocs mmios. */ /* Platform doesn't have mocs mmios. */
...@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
return; return;
for (ring_id = 0; ring_id < cnt; ring_id++) { for (ring_id = 0; ring_id < cnt; ring_id++) {
if (!HAS_ENGINE(dev_priv, ring_id)) if (!HAS_ENGINE(engine->i915, ring_id))
continue; continue;
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) { for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] = gen9_render_mocs.control_table[ring_id][i] =
I915_READ_FW(offset); intel_uncore_read_fw(uncore, offset);
offset.reg += 4; offset.reg += 4;
} }
} }
...@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
offset.reg = 0xb020; offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] = gen9_render_mocs.l3cc_table[i] =
I915_READ_FW(offset); intel_uncore_read_fw(uncore, offset);
offset.reg += 4; offset.reg += 4;
} }
gen9_render_mocs.initialized = true; gen9_render_mocs.initialized = true;
...@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, ...@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
*cs++ = MI_LOAD_REGISTER_IMM(count); *cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio; for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) { i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id || if (mmio->id != ring_id || !mmio->in_context)
!mmio->in_context)
continue; continue;
*cs++ = i915_mmio_reg_offset(mmio->reg); *cs++ = i915_mmio_reg_offset(mmio->reg);
*cs++ = vgpu_vreg_t(vgpu, mmio->reg) | *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
(mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id); *(cs-2), *(cs-1), vgpu->id, ring_id);
} }
...@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = { ...@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = {
[VECS0] = 0x4270, [VECS0] = 0x4270,
}; };
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_uncore *uncore = engine->uncore;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
...@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (!regs) if (!regs)
return; return;
if (drm_WARN_ON(&dev_priv->drm, ring_id >= cnt)) if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
return; return;
if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending)) if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
return; return;
reg = _MMIO(regs[ring_id]); reg = _MMIO(regs[engine->id]);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches, * we need to put a forcewake when invalidating RCS TLB caches,
...@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/ */
fw = intel_uncore_forcewake_for_reg(uncore, reg, fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER; fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw); intel_uncore_forcewake_get(uncore, fw);
intel_uncore_write_fw(uncore, reg, 0x1); intel_uncore_write_fw(uncore, reg, 0x1);
if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50)) if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
engine->name);
else else
vgpu_vreg_t(vgpu, reg) = 0; vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(uncore, fw); intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
} }
static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
int ring_id) const struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv;
i915_reg_t offset, l3_offset;
u32 old_v, new_v;
u32 regs[] = { u32 regs[] = {
[RCS0] = 0xc800, [RCS0] = 0xc800,
[VCS0] = 0xc900, [VCS0] = 0xc900,
...@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
[BCS0] = 0xcc00, [BCS0] = 0xcc00,
[VECS0] = 0xcb00, [VECS0] = 0xcb00,
}; };
struct intel_uncore *uncore = engine->uncore;
i915_reg_t offset, l3_offset;
u32 old_v, new_v;
int i; int i;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
if (drm_WARN_ON(&dev_priv->drm, ring_id >= ARRAY_SIZE(regs)))
return; return;
if (ring_id == RCS0 && IS_GEN(dev_priv, 9)) if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
return; return;
if (!pre && !gen9_render_mocs.initialized) if (!pre && !gen9_render_mocs.initialized)
load_render_mocs(dev_priv); load_render_mocs(engine);
offset.reg = regs[ring_id]; offset.reg = regs[engine->id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) { for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre) if (pre)
old_v = vgpu_vreg_t(pre, offset); old_v = vgpu_vreg_t(pre, offset);
else else
old_v = gen9_render_mocs.control_table[ring_id][i]; old_v = gen9_render_mocs.control_table[engine->id][i];
if (next) if (next)
new_v = vgpu_vreg_t(next, offset); new_v = vgpu_vreg_t(next, offset);
else else
new_v = gen9_render_mocs.control_table[ring_id][i]; new_v = gen9_render_mocs.control_table[engine->id][i];
if (old_v != new_v) if (old_v != new_v)
I915_WRITE_FW(offset, new_v); intel_uncore_write_fw(uncore, offset, new_v);
offset.reg += 4; offset.reg += 4;
} }
if (ring_id == RCS0) { if (engine->id == RCS0) {
l3_offset.reg = 0xb020; l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre) if (pre)
...@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
new_v = gen9_render_mocs.l3cc_table[i]; new_v = gen9_render_mocs.l3cc_table[i];
if (old_v != new_v) if (old_v != new_v)
I915_WRITE_FW(l3_offset, new_v); intel_uncore_write_fw(uncore, l3_offset, new_v);
l3_offset.reg += 4; l3_offset.reg += 4;
} }
...@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce) ...@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce)
/* Switch ring mmio values (context). */ /* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre, static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, struct intel_vgpu *next,
int ring_id) const struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv; struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s; struct intel_vgpu_submission *s;
struct engine_mmio *mmio; struct engine_mmio *mmio;
u32 old_v, new_v; u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; if (INTEL_GEN(engine->i915) >= 9)
if (INTEL_GEN(dev_priv) >= 9) switch_mocs(pre, next, engine);
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio; for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) { i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id) if (mmio->id != engine->id)
continue; continue;
/* /*
* No need to do save or restore of the mmio which is in context * No need to do save or restore of the mmio which is in context
* state image on gen9, it's initialized by lri command and * state image on gen9, it's initialized by lri command and
* save or restore with context together. * save or restore with context together.
*/ */
if (IS_GEN(dev_priv, 9) && mmio->in_context) if (IS_GEN(engine->i915, 9) && mmio->in_context)
continue; continue;
// save // save
if (pre) { if (pre) {
vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); vgpu_vreg_t(pre, mmio->reg) =
intel_uncore_read_fw(uncore, mmio->reg);
if (mmio->mask) if (mmio->mask)
vgpu_vreg_t(pre, mmio->reg) &= vgpu_vreg_t(pre, mmio->reg) &=
~(mmio->mask << 16); ~(mmio->mask << 16);
old_v = vgpu_vreg_t(pre, mmio->reg); old_v = vgpu_vreg_t(pre, mmio->reg);
} else } else {
old_v = mmio->value = I915_READ_FW(mmio->reg); old_v = mmio->value =
intel_uncore_read_fw(uncore, mmio->reg);
}
// restore // restore
if (next) { if (next) {
...@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself. * itself.
*/ */
if (mmio->in_context && if (mmio->in_context &&
!is_inhibit_context(s->shadow[ring_id])) !is_inhibit_context(s->shadow[engine->id]))
continue; continue;
if (mmio->mask) if (mmio->mask)
new_v = vgpu_vreg_t(next, mmio->reg) | new_v = vgpu_vreg_t(next, mmio->reg) |
(mmio->mask << 16); (mmio->mask << 16);
else else
new_v = vgpu_vreg_t(next, mmio->reg); new_v = vgpu_vreg_t(next, mmio->reg);
} else { } else {
...@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre,
new_v = mmio->value; new_v = mmio->value;
} }
I915_WRITE_FW(mmio->reg, new_v); intel_uncore_write_fw(uncore, mmio->reg, new_v);
trace_render_mmio(pre ? pre->id : 0, trace_render_mmio(pre ? pre->id : 0,
next ? next->id : 0, next ? next->id : 0,
...@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre,
} }
if (next) if (next)
handle_tlb_pending_event(next, ring_id); handle_tlb_pending_event(next, engine);
} }
/** /**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine * intel_gvt_switch_render_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine * @pre: the last vGPU that own the engine
* @next: the vGPU to switch to * @next: the vGPU to switch to
* @ring_id: specify the engine * @engine: the engine
* *
* If pre is null indicates that host own the engine. If next is null * If pre is null indicates that host own the engine. If next is null
* indicates that we are switching to host workload. * indicates that we are switching to host workload.
*/ */
void intel_gvt_switch_mmio(struct intel_vgpu *pre, void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id) struct intel_vgpu *next,
const struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv; if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
engine->name))
if (WARN(!pre && !next, "switch ring %d from host to HOST\n", ring_id))
return; return;
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id, gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
/** /**
* We are using raw mmio access wrapper to improve the * We are using raw mmio access wrapper to improve the
* performace for batch mmio read/write, so we need * performace for batch mmio read/write, so we need
* handle forcewake mannually. * handle forcewake mannually.
*/ */
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id); switch_mmio(pre, next, engine);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
} }
/** /**
...@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) ...@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{ {
struct engine_mmio *mmio; struct engine_mmio *mmio;
if (INTEL_GEN(gvt->dev_priv) >= 9) { if (INTEL_GEN(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
...@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) ...@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio; for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) { i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) { if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
} }
} }
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define __GVT_RENDER_H__ #define __GVT_RENDER_H__
struct engine_mmio { struct engine_mmio {
int ring_id; enum intel_engine_id id;
i915_reg_t reg; i915_reg_t reg;
u32 mask; u32 mask;
bool in_context; bool in_context;
...@@ -45,7 +45,8 @@ struct engine_mmio { ...@@ -45,7 +45,8 @@ struct engine_mmio {
}; };
void intel_gvt_switch_mmio(struct intel_vgpu *pre, void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id); struct intel_vgpu *next,
const struct intel_engine_cs *engine);
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
......
...@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) ...@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
enum intel_engine_id i; enum intel_engine_id i;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->gt, i) {
if (!list_empty(workload_q_head(vgpu, i))) if (!list_empty(workload_q_head(vgpu, engine)))
return true; return true;
} }
...@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) ...@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true; scheduler->need_reschedule = true;
/* still have uncompleted workload? */ /* still have uncompleted workload? */
for_each_engine(engine, gvt->dev_priv, i) { for_each_engine(engine, gvt->gt, i) {
if (scheduler->current_workload[i]) if (scheduler->current_workload[engine->id])
return; return;
} }
...@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) ...@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false; scheduler->need_reschedule = false;
/* wake up workload dispatch thread */ /* wake up workload dispatch thread */
for_each_engine(engine, gvt->dev_priv, i) for_each_engine(engine, gvt->gt, i)
wake_up(&scheduler->waitq[i]); wake_up(&scheduler->waitq[engine->id]);
} }
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
...@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{ {
struct intel_gvt_workload_scheduler *scheduler = struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler; &vgpu->gvt->scheduler;
int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (!vgpu_data->active) if (!vgpu_data->active)
return; return;
...@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm); intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock); spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[ring_id] == vgpu) { if (scheduler->engine_owner[engine->id] == vgpu) {
intel_gvt_switch_mmio(vgpu, NULL, ring_id); intel_gvt_switch_mmio(vgpu, NULL, engine);
scheduler->engine_owner[ring_id] = NULL; scheduler->engine_owner[engine->id] = NULL;
} }
} }
spin_unlock_bh(&scheduler->mmio_context_lock); spin_unlock_bh(&scheduler->mmio_context_lock);
......
...@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload) ...@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
static void sr_oa_regs(struct intel_vgpu_workload *workload, static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save) u32 *reg_state, bool save)
{ {
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0; int i = 0;
...@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, ...@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6), i915_mmio_reg_offset(EU_PERF_CNTL6),
}; };
if (workload->ring_id != RCS0) if (workload->engine->id != RCS0)
return; return;
if (save) { if (save) {
...@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
workload->req->context->state->obj; workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
...@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl); COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp); COPY_REG(ctx_timestamp);
if (ring_id == RCS0) { if (workload->engine->id == RCS0) {
COPY_REG(bb_per_ctx_ptr); COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset); COPY_REG(rcs_indirect_ctx_offset);
...@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
return 0; return 0;
gvt_dbg_sched("ring id %d workload lrca %x", ring_id, gvt_dbg_sched("ring %s workload lrca %x",
workload->ctx_desc.lrca); workload->engine->name,
workload->ctx_desc.lrca);
context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT; context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19; context_page_num = 19;
i = 2; i = 2;
...@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq) ...@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq)
return intel_context_force_single_submission(rq->context); return intel_context_force_single_submission(rq->context);
} }
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) static void save_ring_hw_state(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_uncore *uncore = engine->uncore;
u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
i915_reg_t reg; i915_reg_t reg;
reg = RING_INSTDONE(ring_base); reg = RING_INSTDONE(engine->mmio_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
reg = RING_ACTHD(ring_base); intel_uncore_read(uncore, reg);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
reg = RING_ACTHD_UDW(ring_base); reg = RING_ACTHD(engine->mmio_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
intel_uncore_read(uncore, reg);
reg = RING_ACTHD_UDW(engine->mmio_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
intel_uncore_read(uncore, reg);
} }
static int shadow_context_status_change(struct notifier_block *nb, static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
struct i915_request *req = data; struct i915_request *rq = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt, struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]); shadow_ctx_notifier_block[rq->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id; enum intel_engine_id ring_id = rq->engine->id;
struct intel_vgpu_workload *workload; struct intel_vgpu_workload *workload;
unsigned long flags; unsigned long flags;
if (!is_gvt_request(req)) { if (!is_gvt_request(rq)) {
spin_lock_irqsave(&scheduler->mmio_context_lock, flags); spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN && if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) { scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */ /* Switch ring from vGPU to host. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
NULL, ring_id); NULL, rq->engine);
scheduler->engine_owner[ring_id] = NULL; scheduler->engine_owner[ring_id] = NULL;
} }
spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
...@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
if (workload->vgpu != scheduler->engine_owner[ring_id]) { if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */ /* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
workload->vgpu, ring_id); workload->vgpu, rq->engine);
scheduler->engine_owner[ring_id] = workload->vgpu; scheduler->engine_owner[ring_id] = workload->vgpu;
} else } else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
...@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
save_ring_hw_state(workload->vgpu, ring_id); save_ring_hw_state(workload->vgpu, rq->engine);
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED: case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id); save_ring_hw_state(workload->vgpu, rq->engine);
break; break;
default: default:
WARN_ON(1); WARN_ON(1);
...@@ -391,7 +395,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) ...@@ -391,7 +395,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
if (workload->req) if (workload->req)
return 0; return 0;
rq = i915_request_create(s->shadow[workload->ring_id]); rq = i915_request_create(s->shadow[workload->engine->id]);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n"); gvt_vgpu_err("fail to allocate gem request\n");
return PTR_ERR(rq); return PTR_ERR(rq);
...@@ -420,15 +424,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -420,15 +424,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow) if (workload->shadow)
return 0; return 0;
if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(s->shadow[workload->ring_id], shadow_context_descriptor_update(s->shadow[workload->engine->id],
workload); workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload); ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret) if (ret)
return ret; return ret;
if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { if (workload->engine->id == RCS0 &&
workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret) if (ret)
goto err_shadow; goto err_shadow;
...@@ -436,6 +441,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -436,6 +441,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->shadow = true; workload->shadow = true;
return 0; return 0;
err_shadow: err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx); release_shadow_wa_ctx(&workload->wa_ctx);
return ret; return ret;
...@@ -567,12 +573,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -567,12 +573,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; workload->rb_start;
u32 ring_base;
ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
} }
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
...@@ -608,7 +610,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -608,7 +610,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
int ring = workload->ring_id;
int ret = 0; int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm); ret = intel_vgpu_pin_mm(workload->shadow_mm);
...@@ -625,7 +626,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -625,7 +626,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload); update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[ring]); set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu); ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) { if (ret) {
...@@ -677,11 +678,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -677,11 +678,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *rq; struct i915_request *rq;
int ring_id = workload->ring_id;
int ret; int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
ring_id, workload); workload->engine->name, workload);
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
...@@ -710,8 +710,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -710,8 +710,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
} }
if (!IS_ERR_OR_NULL(workload->req)) { if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n", gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
ring_id, workload->req); workload->engine->name, workload->req);
i915_request_add(workload->req); i915_request_add(workload->req);
workload->dispatched = true; workload->dispatched = true;
} }
...@@ -722,8 +722,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -722,8 +722,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
return ret; return ret;
} }
static struct intel_vgpu_workload *pick_next_workload( static struct intel_vgpu_workload *
struct intel_gvt *gvt, int ring_id) pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
{ {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
...@@ -735,27 +735,27 @@ static struct intel_vgpu_workload *pick_next_workload( ...@@ -735,27 +735,27 @@ static struct intel_vgpu_workload *pick_next_workload(
* bail out * bail out
*/ */
if (!scheduler->current_vgpu) { if (!scheduler->current_vgpu) {
gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
goto out; goto out;
} }
if (scheduler->need_reschedule) { if (scheduler->need_reschedule) {
gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
goto out; goto out;
} }
if (!scheduler->current_vgpu->active || if (!scheduler->current_vgpu->active ||
list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out; goto out;
/* /*
* still have current workload, maybe the workload disptacher * still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it. * fail to submit it for some reason, resubmit it.
*/ */
if (scheduler->current_workload[ring_id]) { if (scheduler->current_workload[engine->id]) {
workload = scheduler->current_workload[ring_id]; workload = scheduler->current_workload[engine->id];
gvt_dbg_sched("ring id %d still have current workload %p\n", gvt_dbg_sched("ring %s still have current workload %p\n",
ring_id, workload); engine->name, workload);
goto out; goto out;
} }
...@@ -765,13 +765,14 @@ static struct intel_vgpu_workload *pick_next_workload( ...@@ -765,13 +765,14 @@ static struct intel_vgpu_workload *pick_next_workload(
* will wait the current workload is finished when trying to * will wait the current workload is finished when trying to
* schedule out a vgpu. * schedule out a vgpu.
*/ */
scheduler->current_workload[ring_id] = container_of( scheduler->current_workload[engine->id] =
workload_q_head(scheduler->current_vgpu, ring_id)->next, list_first_entry(workload_q_head(scheduler->current_vgpu,
struct intel_vgpu_workload, list); engine),
struct intel_vgpu_workload, list);
workload = scheduler->current_workload[ring_id]; workload = scheduler->current_workload[engine->id];
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
atomic_inc(&workload->vgpu->submission.running_workload_num); atomic_inc(&workload->vgpu->submission.running_workload_num);
out: out:
...@@ -783,14 +784,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -783,14 +784,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{ {
struct i915_request *rq = workload->req; struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj; struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *src; void *src;
unsigned long context_gpa, context_page_num; unsigned long context_gpa, context_page_num;
int i; int i;
struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 ring_base; u32 ring_base;
u32 head, tail; u32 head, tail;
u16 wrap_count; u16 wrap_count;
...@@ -811,14 +810,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -811,14 +810,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
ring_base = dev_priv->engine[workload->ring_id]->mmio_base; ring_base = rq->engine->mmio_base;
vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
context_page_num = rq->engine->context_size; context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT; context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0) if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19; context_page_num = 19;
i = 2; i = 2;
...@@ -869,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, ...@@ -869,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask) intel_engine_mask_t engine_mask)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n; struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp; intel_engine_mask_t tmp;
...@@ -966,54 +965,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -966,54 +965,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
} }
struct workload_thread_param { static int workload_thread(void *arg)
struct intel_gvt *gvt;
int ring_id;
};
static int workload_thread(void *priv)
{ {
struct workload_thread_param *p = (struct workload_thread_param *)priv; struct intel_engine_cs *engine = arg;
struct intel_gvt *gvt = p->gvt; const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
int ring_id = p->ring_id; struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL; struct intel_vgpu *vgpu = NULL;
int ret; int ret;
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id); gvt_dbg_core("workload thread for ring %s started\n", engine->name);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
add_wait_queue(&scheduler->waitq[ring_id], &wait); intel_wakeref_t wakeref;
add_wait_queue(&scheduler->waitq[engine->id], &wait);
do { do {
workload = pick_next_workload(gvt, ring_id); workload = pick_next_workload(gvt, engine);
if (workload) if (workload)
break; break;
wait_woken(&wait, TASK_INTERRUPTIBLE, wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop()); } while (!kthread_should_stop());
remove_wait_queue(&scheduler->waitq[ring_id], &wait); remove_wait_queue(&scheduler->waitq[engine->id], &wait);
if (!workload) if (!workload)
break; break;
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
workload->ring_id, workload, engine->name, workload,
workload->vgpu->id); workload->vgpu->id);
intel_runtime_pm_get(rpm); wakeref = intel_runtime_pm_get(engine->uncore->rpm);
gvt_dbg_sched("ring id %d will dispatch workload %p\n", gvt_dbg_sched("ring %s will dispatch workload %p\n",
workload->ring_id, workload); engine->name, workload);
if (need_force_wake) if (need_force_wake)
intel_uncore_forcewake_get(&gvt->dev_priv->uncore, intel_uncore_forcewake_get(engine->uncore,
FORCEWAKE_ALL); FORCEWAKE_ALL);
/* /*
* Update the vReg of the vGPU which submitted this * Update the vReg of the vGPU which submitted this
* workload. The vGPU may use these registers for checking * workload. The vGPU may use these registers for checking
...@@ -1030,21 +1022,21 @@ static int workload_thread(void *priv) ...@@ -1030,21 +1022,21 @@ static int workload_thread(void *priv)
goto complete; goto complete;
} }
gvt_dbg_sched("ring id %d wait workload %p\n", gvt_dbg_sched("ring %s wait workload %p\n",
workload->ring_id, workload); engine->name, workload);
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete: complete:
gvt_dbg_sched("will complete workload %p, status: %d\n", gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status); workload, workload->status);
complete_current_workload(gvt, ring_id); complete_current_workload(gvt, engine->id);
if (need_force_wake) if (need_force_wake)
intel_uncore_forcewake_put(&gvt->dev_priv->uncore, intel_uncore_forcewake_put(engine->uncore,
FORCEWAKE_ALL); FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(rpm); intel_runtime_pm_put(engine->uncore->rpm, wakeref);
if (ret && (vgpu_is_vm_unhealthy(ret))) if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
} }
...@@ -1073,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) ...@@ -1073,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n"); gvt_dbg_core("clean workload scheduler\n");
for_each_engine(engine, gvt->dev_priv, i) { for_each_engine(engine, gvt->gt, i) {
atomic_notifier_chain_unregister( atomic_notifier_chain_unregister(
&engine->context_status_notifier, &engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]); &gvt->shadow_ctx_notifier_block[i]);
...@@ -1084,7 +1076,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) ...@@ -1084,7 +1076,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{ {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id i; enum intel_engine_id i;
int ret; int ret;
...@@ -1093,20 +1084,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) ...@@ -1093,20 +1084,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq); init_waitqueue_head(&scheduler->workload_complete_wq);
for_each_engine(engine, gvt->dev_priv, i) { for_each_engine(engine, gvt->gt, i) {
init_waitqueue_head(&scheduler->waitq[i]); init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL); scheduler->thread[i] = kthread_run(workload_thread, engine,
if (!param) { "gvt:%s", engine->name);
ret = -ENOMEM;
goto err;
}
param->gvt = gvt;
param->ring_id = i;
scheduler->thread[i] = kthread_run(workload_thread, param,
"gvt workload %d", i);
if (IS_ERR(scheduler->thread[i])) { if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n"); gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]); ret = PTR_ERR(scheduler->thread[i]);
...@@ -1118,11 +1100,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) ...@@ -1118,11 +1100,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
atomic_notifier_chain_register(&engine->context_status_notifier, atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]); &gvt->shadow_ctx_notifier_block[i]);
} }
return 0; return 0;
err: err:
intel_gvt_clean_workload_scheduler(gvt); intel_gvt_clean_workload_scheduler(gvt);
kfree(param);
param = NULL;
return ret; return ret;
} }
...@@ -1160,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) ...@@ -1160,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id) for_each_engine(engine, vgpu->gvt->gt, id)
intel_context_unpin(s->shadow[id]); intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads); kmem_cache_destroy(s->workloads);
...@@ -1217,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, ...@@ -1217,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/ */
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_ppgtt *ppgtt; struct i915_ppgtt *ppgtt;
...@@ -1230,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1230,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_save(s, ppgtt); i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) { for_each_engine(engine, vgpu->gvt->gt, i) {
struct intel_context *ce; struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]); INIT_LIST_HEAD(&s->workload_q_head[i]);
...@@ -1283,7 +1265,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1283,7 +1265,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx: out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ppgtt); i915_context_ppgtt_root_restore(s, ppgtt);
for_each_engine(engine, i915, i) { for_each_engine(engine, vgpu->gvt->gt, i) {
if (IS_ERR(s->shadow[i])) if (IS_ERR(s->shadow[i]))
break; break;
...@@ -1310,7 +1292,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, ...@@ -1310,7 +1292,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask, intel_engine_mask_t engine_mask,
unsigned int interface) unsigned int interface)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = { const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] = [INTEL_VGPU_EXECLIST_SUBMISSION] =
...@@ -1444,7 +1426,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -1444,7 +1426,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
/** /**
* intel_vgpu_create_workload - create a vGPU workload * intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU * @vgpu: a vGPU
* @ring_id: ring index * @engine: the engine
* @desc: a guest context descriptor * @desc: a guest context descriptor
* *
* This function is called when creating a vGPU workload. * This function is called when creating a vGPU workload.
...@@ -1455,14 +1437,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -1455,14 +1437,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
* *
*/ */
struct intel_vgpu_workload * struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_vgpu_create_workload(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc) struct execlist_ctx_descriptor_format *desc)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct list_head *q = workload_q_head(vgpu, ring_id); struct list_head *q = workload_q_head(vgpu, engine);
struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa; u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
u32 guest_head; u32 guest_head;
...@@ -1489,10 +1471,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1489,10 +1471,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
list_for_each_entry_reverse(last_workload, q, list) { list_for_each_entry_reverse(last_workload, q, list) {
if (same_context(&last_workload->ctx_desc, desc)) { if (same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", gvt_dbg_el("ring %s cur workload == last\n",
ring_id); engine->name);
gvt_dbg_el("ctx head %x real head %lx\n", head, gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail); last_workload->rb_tail);
/* /*
* cannot use guest context head pointer here, * cannot use guest context head pointer here,
* as it might not be updated at this time * as it might not be updated at this time
...@@ -1502,7 +1484,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1502,7 +1484,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
} }
} }
gvt_dbg_el("ring id %d begin a new workload\n", ring_id); gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */ /* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
...@@ -1522,7 +1504,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1522,7 +1504,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (IS_ERR(workload)) if (IS_ERR(workload))
return workload; return workload;
workload->ring_id = ring_id; workload->engine = engine;
workload->ctx_desc = *desc; workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa; workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head; workload->rb_head = head;
...@@ -1531,7 +1513,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1531,7 +1513,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start; workload->rb_start = start;
workload->rb_ctl = ctl; workload->rb_ctl = ctl;
if (ring_id == RCS0) { if (engine->id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
...@@ -1569,8 +1551,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1569,8 +1551,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
} }
} }
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl); workload, engine->name, head, tail, start, ctl);
ret = prepare_mm(workload); ret = prepare_mm(workload);
if (ret) { if (ret) {
...@@ -1581,10 +1563,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1581,10 +1563,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
/* Only scan and shadow the first workload in the queue /* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow. * as there is only one pre-allocated buf-obj for shadow.
*/ */
if (list_empty(workload_q_head(vgpu, ring_id))) { if (list_empty(q)) {
intel_runtime_pm_get(&dev_priv->runtime_pm); intel_wakeref_t wakeref;
ret = intel_gvt_scan_and_shadow_workload(workload);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
ret = intel_gvt_scan_and_shadow_workload(workload);
} }
if (ret) { if (ret) {
...@@ -1604,7 +1587,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1604,7 +1587,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{ {
list_add_tail(&workload->list, list_add_tail(&workload->list,
workload_q_head(workload->vgpu, workload->ring_id)); workload_q_head(workload->vgpu, workload->engine));
intel_gvt_kick_schedule(workload->vgpu->gvt); intel_gvt_kick_schedule(workload->vgpu->gvt);
wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]); wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
} }
...@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx { ...@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload { struct intel_vgpu_workload {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ring_id; const struct intel_engine_cs *engine;
struct i915_request *req; struct i915_request *req;
/* if this workload has been dispatched to i915? */ /* if this workload has been dispatched to i915? */
bool dispatched; bool dispatched;
...@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb { ...@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb {
bool ppgtt; bool ppgtt;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, e) \
(&(vgpu->submission.workload_q_head[ring_id])) (&(vgpu)->submission.workload_q_head[(e)->id])
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
...@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops ...@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops; intel_vgpu_execlist_submission_ops;
struct intel_vgpu_workload * struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_vgpu_create_workload(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc); struct execlist_ctx_descriptor_format *desc);
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu) void populate_pvinfo_page(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* setup the ballooning information */ /* setup the ballooning information */
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
...@@ -149,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -149,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm); high_avail / vgpu_types[i].high_mm);
if (IS_GEN(gvt->dev_priv, 8)) if (IS_GEN(gvt->gt->i915, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s", sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name); vgpu_types[i].name);
else if (IS_GEN(gvt->dev_priv, 9)) else if (IS_GEN(gvt->gt->i915, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s", sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name); vgpu_types[i].name);
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name, i, gvt->types[i].name,
...@@ -271,8 +271,8 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu) ...@@ -271,8 +271,8 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
*/ */
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment