Commit a61ac1e7 authored by Chris Wilson's avatar Chris Wilson Committed by Zhenyu Wang

drm/i915/gvt: Wean gvt off using dev_priv

Teach gvt to use intel_gt directly as it currently assumes direct HW
access.

[Zhenyu: rebase, fix compiling]

Cc: Ding Zhuocheng <zhuocheng.ding@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200304032307.2983-3-zhenyuw@linux.intel.com
parent 8fde4107
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gt *gt = gvt->gt;
unsigned int flags; unsigned int flags;
u64 start, end, size; u64 start, end, size;
struct drm_mm_node *node; struct drm_mm_node *node;
...@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE; flags = PIN_MAPPABLE;
} }
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(dev_priv); mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE, size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
start, end, flags); start, end, flags);
mmio_hw_access_post(dev_priv); mmio_hw_access_post(gt);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gt->ggtt->vm.mutex);
if (ret) if (ret)
gvt_err("fail to alloc %s gm space from host\n", gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low"); high_gm ? "high" : "low");
...@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu) static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gt *gt = gvt->gt;
int ret; int ret;
ret = alloc_gm(vgpu, false); ret = alloc_gm(vgpu, false);
...@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) ...@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0; return 0;
out_free_aperture: out_free_aperture:
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gt->ggtt->vm.mutex);
return ret; return ret;
} }
static void free_vgpu_gm(struct intel_vgpu *vgpu) static void free_vgpu_gm(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gt *gt = gvt->gt;
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gt->ggtt->vm.mutex);
} }
/** /**
...@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, ...@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value) u32 fence, u64 value)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg; struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi; i915_reg_t fence_reg_lo, fence_reg_hi;
assert_rpm_wakelock_held(&dev_priv->runtime_pm); assert_rpm_wakelock_held(uncore->rpm);
if (drm_WARN_ON(&dev_priv->drm, fence >= vgpu_fence_sz(vgpu))) if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return; return;
reg = vgpu->fence.regs[fence]; reg = vgpu->fence.regs[fence];
if (drm_WARN_ON(&dev_priv->drm, !reg)) if (drm_WARN_ON(&i915->drm, !reg))
return; return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
I915_WRITE(fence_reg_lo, 0); intel_uncore_write(uncore, fence_reg_lo, 0);
POSTING_READ(fence_reg_lo); intel_uncore_posting_read(uncore, fence_reg_lo);
I915_WRITE(fence_reg_hi, upper_32_bits(value)); intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value)); intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo); intel_uncore_posting_read(uncore, fence_reg_lo);
} }
static void _clear_vgpu_fence(struct intel_vgpu *vgpu) static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
...@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu) ...@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu) static void free_vgpu_fence(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg; struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
u32 i; u32 i;
if (drm_WARN_ON(&dev_priv->drm, !vgpu_fence_sz(vgpu))) if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return; return;
intel_runtime_pm_get(&dev_priv->runtime_pm); wakeref = intel_runtime_pm_get(uncore->rpm);
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu); _clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) { for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i]; reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg); i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL; vgpu->fence.regs[i] = NULL;
} }
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); intel_runtime_pm_put(uncore->rpm, wakeref);
} }
static int alloc_vgpu_fence(struct intel_vgpu *vgpu) static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_uncore *uncore = gvt->gt->uncore;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct i915_fence_reg *reg; struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
int i; int i;
intel_runtime_pm_get(rpm); wakeref = intel_runtime_pm_get(uncore->rpm);
/* Request fences from host */ /* Request fences from host */
mutex_lock(&dev_priv->ggtt.vm.mutex); mutex_lock(&gvt->gt->ggtt->vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) { for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(&dev_priv->ggtt); reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg)) if (IS_ERR(reg))
goto out_free_fence; goto out_free_fence;
...@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) ...@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu); _clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(rpm); intel_runtime_pm_put(uncore->rpm, wakeref);
return 0; return 0;
out_free_fence: out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n"); gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */ /* Return fences to host, if fail */
...@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) ...@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
i915_unreserve_fence(reg); i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL; vgpu->fence.regs[i] = NULL;
} }
mutex_unlock(&dev_priv->ggtt.vm.mutex); mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(rpm); intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC; return -ENOSPC;
} }
...@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) ...@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/ */
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_gvt *gvt = vgpu->gvt;
intel_wakeref_t wakeref;
intel_runtime_pm_get(&dev_priv->runtime_pm); with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
_clear_vgpu_fence(vgpu); _clear_vgpu_fence(vgpu);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
} }
/** /**
......
...@@ -106,7 +106,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -106,7 +106,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 4)) if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL; return -EINVAL;
...@@ -300,7 +300,7 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -300,7 +300,7 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret; int ret;
if (drm_WARN_ON(&i915->drm, bytes > 4)) if (drm_WARN_ON(&i915->drm, bytes > 4))
...@@ -396,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, ...@@ -396,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 0); pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2); pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
} }
......
...@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv, ...@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt, static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data) u32 offset, void *data)
{ {
struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data; struct mmio_diff_param *param = data;
struct diff_mmio *node; struct diff_mmio *node;
u32 preg, vreg; u32 preg, vreg;
preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset)); preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset); vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) { if (preg != vreg) {
...@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) ...@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock); spin_lock_bh(&gvt->scheduler.mmio_context_lock);
mmio_hw_access_pre(gvt->dev_priv); mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */ /* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param); intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
mmio_hw_access_post(gvt->dev_priv); mmio_hw_access_post(gvt->gt);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock); spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
...@@ -186,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) ...@@ -186,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/ */
void intel_gvt_debugfs_init(struct intel_gvt *gvt) void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{ {
struct drm_minor *minor = gvt->dev_priv->drm.primary; struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
......
...@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu) ...@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0; return 0;
...@@ -69,7 +69,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) ...@@ -69,7 +69,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&dev_priv->drm, if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES)) pipe < PIPE_A || pipe >= I915_MAX_PIPES))
...@@ -169,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { ...@@ -169,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu) static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe; int pipe;
if (IS_BROXTON(dev_priv)) { if (IS_BROXTON(dev_priv)) {
...@@ -320,7 +320,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) ...@@ -320,7 +320,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution) int type, unsigned int resolution)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM)) if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
...@@ -391,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) ...@@ -391,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq; struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = { int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK, [PIPE_A] = PIPE_A_VBLANK,
...@@ -423,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu) ...@@ -423,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe; int pipe;
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe) for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe); emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
} }
...@@ -456,11 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) ...@@ -456,11 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/ */
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */ /* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
IS_COFFEELAKE(dev_priv)) { IS_COFFEELAKE(i915)) {
if (connected) { if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED; SFUSE_STRAP_DDID_DETECTED;
...@@ -486,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) ...@@ -486,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/ */
void intel_vgpu_clean_display(struct intel_vgpu *vgpu) void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) IS_COFFEELAKE(dev_priv))
...@@ -508,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) ...@@ -508,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/ */
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
intel_vgpu_init_i2c_edid(vgpu); intel_vgpu_init_i2c_edid(vgpu);
......
...@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, ...@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{ {
struct drm_device *dev = &vgpu->gvt->dev_priv->drm; struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args; struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info; struct intel_vgpu_fb_info fb_info;
...@@ -523,7 +523,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) ...@@ -523,7 +523,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
/* To associate an exposed dmabuf with the dmabuf_obj */ /* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{ {
struct drm_device *dev = &vgpu->gvt->dev_priv->drm; struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
......
...@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu) ...@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu, static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select; int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
...@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, ...@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0) if (pin_select == 0)
return 0; return 0;
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select); port = bxt_get_port_from_gmbus0(pin_select);
else if (IS_COFFEELAKE(dev_priv)) else if (IS_COFFEELAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select); port = cnp_get_port_from_gmbus0(pin_select);
else else
port = get_port_from_gmbus0(pin_select); port = get_port_from_gmbus0(pin_select);
if (drm_WARN_ON(&dev_priv->drm, port < 0)) if (drm_WARN_ON(&i915->drm, port < 0))
return 0; return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS; vgpu->display.i2c_edid.state = I2C_GMBUS;
...@@ -276,7 +276,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -276,7 +276,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
drm_WARN_ON(&i915->drm, 1); drm_WARN_ON(&i915->drm, 1);
return 0; return 0;
...@@ -373,7 +373,7 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -373,7 +373,7 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL; return -EINVAL;
...@@ -403,7 +403,7 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, ...@@ -403,7 +403,7 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu, int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL; return -EINVAL;
...@@ -479,7 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, ...@@ -479,7 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
unsigned int offset, unsigned int offset,
void *p_data) void *p_data)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size; int msg_length, ret_msg_size;
int msg, addr, ctrl, op; int msg, addr, ctrl, op;
......
...@@ -524,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, ...@@ -524,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
static void clean_execlist(struct intel_vgpu *vgpu, static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask) intel_engine_mask_t engine_mask)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp; intel_engine_mask_t tmp;
...@@ -539,7 +539,7 @@ static void clean_execlist(struct intel_vgpu *vgpu, ...@@ -539,7 +539,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask) intel_engine_mask_t engine_mask)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
intel_engine_mask_t tmp; intel_engine_mask_t tmp;
......
...@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha, ...@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp) u32 tiled, int stride_mask, int bpp)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg; u32 stride = stride_reg;
...@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu) ...@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane) struct intel_vgpu_primary_plane_format *plane)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, fmt; u32 val, fmt;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe; int pipe;
pipe = get_active_pipe(vgpu); pipe = get_active_pipe(vgpu);
...@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode) ...@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane) struct intel_vgpu_cursor_plane_format *plane)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, mode, index; u32 val, mode, index;
u32 alpha_plane, alpha_force; u32 alpha_plane, alpha_force;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe; int pipe;
pipe = get_active_pipe(vgpu); pipe = get_active_pipe(vgpu);
......
...@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = { ...@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{ {
struct drm_i915_private *i915 = gvt->dev_priv; *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
*(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
_MMIO(offset)); _MMIO(offset));
return 0; return 0;
} }
...@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) ...@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt) static int expose_firmware_sysfs(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
void *firmware; void *firmware;
void *p; void *p;
...@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) ...@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt) static void clean_firmware_sysfs(struct intel_gvt *gvt)
{ {
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr); device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private); vfree(firmware_attr.private);
...@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt, ...@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw) const struct firmware *fw)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
unsigned long id, crc32_start; unsigned long id, crc32_start;
const void *mem; const void *mem;
...@@ -208,8 +205,7 @@ static int verify_firmware(struct intel_gvt *gvt, ...@@ -208,8 +205,7 @@ static int verify_firmware(struct intel_gvt *gvt,
int intel_gvt_load_firmware(struct intel_gvt *gvt) int intel_gvt_load_firmware(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware; struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
const struct firmware *fw; const struct firmware *fw;
...@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) ...@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path); gvt_dbg_core("request hw state firmware %s...\n", path);
ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev); ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
kfree(path); kfree(path);
if (ret) if (ret)
......
...@@ -71,7 +71,7 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) ...@@ -71,7 +71,7 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
/* translate a guest gmadr to host gmadr */ /* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
"invalid guest gmadr %llx\n", g_addr)) "invalid guest gmadr %llx\n", g_addr))
...@@ -89,7 +89,7 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) ...@@ -89,7 +89,7 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
/* translate a host gmadr to guest gmadr */ /* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
"invalid host gmadr %llx\n", h_addr)) "invalid host gmadr %llx\n", h_addr))
...@@ -279,24 +279,23 @@ static inline int get_pse_type(int type) ...@@ -279,24 +279,23 @@ static inline int get_pse_type(int type)
return gtt_type_table[type].pse_entry_type; return gtt_type_table[type].pse_entry_type;
} }
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{ {
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
return readq(addr); return readq(addr);
} }
static void ggtt_invalidate(struct drm_i915_private *dev_priv) static void ggtt_invalidate(struct intel_gt *gt)
{ {
mmio_hw_access_pre(dev_priv); mmio_hw_access_pre(gt);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
mmio_hw_access_post(dev_priv); mmio_hw_access_post(gt);
} }
static void write_pte64(struct drm_i915_private *dev_priv, static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
unsigned long index, u64 pte)
{ {
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
writeq(pte, addr); writeq(pte, addr);
} }
...@@ -319,7 +318,7 @@ static inline int gtt_get_entry64(void *pt, ...@@ -319,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
if (WARN_ON(ret)) if (WARN_ON(ret))
return ret; return ret;
} else if (!pt) { } else if (!pt) {
e->val64 = read_pte64(vgpu->gvt->dev_priv, index); e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
} else { } else {
e->val64 = *((u64 *)pt + index); e->val64 = *((u64 *)pt + index);
} }
...@@ -344,7 +343,7 @@ static inline int gtt_set_entry64(void *pt, ...@@ -344,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
if (WARN_ON(ret)) if (WARN_ON(ret))
return ret; return ret;
} else if (!pt) { } else if (!pt) {
write_pte64(vgpu->gvt->dev_priv, index, e->val64); write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
} else { } else {
*((u64 *)pt + index) = e->val64; *((u64 *)pt + index) = e->val64;
} }
...@@ -738,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu, ...@@ -738,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
...@@ -823,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); ...@@ -823,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{ {
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL; struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr; dma_addr_t daddr;
int ret; int ret;
...@@ -944,7 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); ...@@ -944,7 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e) struct intel_gvt_gtt_entry *e)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s; struct intel_vgpu_ppgtt_spt *s;
enum intel_gvt_gtt_type cur_pt_type; enum intel_gvt_gtt_type cur_pt_type;
...@@ -1051,7 +1050,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1051,7 +1050,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
...@@ -1160,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, ...@@ -1160,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn; unsigned long pfn;
if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0; return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
...@@ -2317,7 +2316,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -2317,7 +2316,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ggtt_invalidate_pte(vgpu, &e); ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate(gvt->dev_priv); ggtt_invalidate(gvt->gt);
return 0; return 0;
} }
...@@ -2350,14 +2349,14 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, ...@@ -2350,14 +2349,14 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int alloc_scratch_pages(struct intel_vgpu *vgpu,
enum intel_gvt_gtt_type type) enum intel_gvt_gtt_type type)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = I915_GTT_PAGE_SIZE >> int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift; vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt; void *scratch_pt;
int i; int i;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
if (drm_WARN_ON(&i915->drm, if (drm_WARN_ON(&i915->drm,
...@@ -2415,7 +2414,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, ...@@ -2415,7 +2414,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu) static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{ {
int i; int i;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
...@@ -2687,7 +2686,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2687,7 +2686,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{ {
int ret; int ret;
void *page; void *page;
struct device *dev = &gvt->dev_priv->drm.pdev->dev; struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
gvt_dbg_core("init gtt\n"); gvt_dbg_core("init gtt\n");
...@@ -2736,7 +2735,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2736,7 +2735,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/ */
void intel_gvt_clean_gtt(struct intel_gvt *gvt) void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{ {
struct device *dev = &gvt->dev_priv->drm.pdev->dev; struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT); I915_GTT_PAGE_SHIFT);
...@@ -2784,7 +2783,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) ...@@ -2784,7 +2783,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry; struct intel_gvt_gtt_entry old_entry;
...@@ -2814,7 +2812,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) ...@@ -2814,7 +2812,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
} }
ggtt_invalidate(dev_priv); ggtt_invalidate(gvt->gt);
} }
/** /**
......
...@@ -49,15 +49,15 @@ static const char * const supported_hypervisors[] = { ...@@ -49,15 +49,15 @@ static const char * const supported_hypervisors[] = {
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name) const char *name)
{ {
const char *driver_name =
dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
int i; int i;
struct intel_vgpu_type *t;
const char *driver_name = dev_driver_string(
&gvt->dev_priv->drm.pdev->dev);
name += strlen(driver_name) + 1;
for (i = 0; i < gvt->num_types; i++) { for (i = 0; i < gvt->num_types; i++) {
t = &gvt->types[i]; struct intel_vgpu_type *t = &gvt->types[i];
if (!strncmp(t->name, name + strlen(driver_name) + 1,
sizeof(t->name))) if (!strncmp(t->name, name, sizeof(t->name)))
return t; return t;
} }
...@@ -189,7 +189,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -189,7 +189,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
info->max_support_vgpus = 8; info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
...@@ -255,17 +255,17 @@ static int init_service_thread(struct intel_gvt *gvt) ...@@ -255,17 +255,17 @@ static int init_service_thread(struct intel_gvt *gvt)
/** /**
* intel_gvt_clean_device - clean a GVT device * intel_gvt_clean_device - clean a GVT device
* @dev_priv: i915 private * @i915: i915 private
* *
* This function is called at the driver unloading stage, to free the * This function is called at the driver unloading stage, to free the
* resources owned by a GVT device. * resources owned by a GVT device.
* *
*/ */
void intel_gvt_clean_device(struct drm_i915_private *dev_priv) void intel_gvt_clean_device(struct drm_i915_private *i915)
{ {
struct intel_gvt *gvt = to_gvt(dev_priv); struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
if (drm_WARN_ON(&dev_priv->drm, !gvt)) if (drm_WARN_ON(&i915->drm, !gvt))
return; return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
...@@ -283,13 +283,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -283,13 +283,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt); intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr); idr_destroy(&gvt->vgpu_idr);
kfree(dev_priv->gvt); kfree(i915->gvt);
dev_priv->gvt = NULL;
} }
/** /**
* intel_gvt_init_device - initialize a GVT device * intel_gvt_init_device - initialize a GVT device
* @dev_priv: drm i915 private data * @i915: drm i915 private data
* *
* This function is called at the initialization stage, to initialize * This function is called at the initialization stage, to initialize
* necessary GVT components. * necessary GVT components.
...@@ -298,13 +297,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -298,13 +297,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
* *
*/ */
int intel_gvt_init_device(struct drm_i915_private *dev_priv) int intel_gvt_init_device(struct drm_i915_private *i915)
{ {
struct intel_gvt *gvt; struct intel_gvt *gvt;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ret; int ret;
if (drm_WARN_ON(&dev_priv->drm, dev_priv->gvt)) if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST; return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
...@@ -317,8 +316,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -317,8 +316,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock); spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock); mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock); mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv; gvt->gt = &i915->gt;
dev_priv->gvt = gvt; i915->gvt = gvt;
init_device_info(gvt); init_device_info(gvt);
...@@ -377,7 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -377,7 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt); intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n"); gvt_dbg_core("gvt device initialization is done\n");
intel_gvt_host.dev = &dev_priv->drm.pdev->dev; intel_gvt_host.dev = &i915->drm.pdev->dev;
intel_gvt_host.initialized = true; intel_gvt_host.initialized = true;
return 0; return 0;
...@@ -402,7 +401,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -402,7 +401,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
out_clean_idr: out_clean_idr:
idr_destroy(&gvt->vgpu_idr); idr_destroy(&gvt->vgpu_idr);
kfree(gvt); kfree(gvt);
dev_priv->gvt = NULL; i915->gvt = NULL;
return ret; return ret;
} }
......
...@@ -286,7 +286,7 @@ struct intel_gvt { ...@@ -286,7 +286,7 @@ struct intel_gvt {
/* scheduler scope lock, protect gvt and vgpu schedule related data */ /* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock; struct mutex sched_lock;
struct drm_i915_private *dev_priv; struct intel_gt *gt;
struct idr vgpu_idr; /* vGPU IDR pool */ struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info; struct intel_gvt_device_info device_info;
...@@ -356,13 +356,14 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -356,13 +356,14 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4 #define HOST_FENCE 4
#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
/* Aperture/GM space definitions for GVT device */ /* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total) #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
#define gvt_ggtt_sz(gvt) \ #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0) #define gvt_aperture_gmadr_base(gvt) (0)
...@@ -374,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -374,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1) + gvt_hidden_sz(gvt) - 1)
#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences) #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
/* Aperture/GM space definitions for vGPU */ /* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
...@@ -565,14 +566,14 @@ enum { ...@@ -565,14 +566,14 @@ enum {
GVT_FAILSAFE_GUEST_ERR, GVT_FAILSAFE_GUEST_ERR,
}; };
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) static inline void mmio_hw_access_pre(struct intel_gt *gt)
{ {
intel_runtime_pm_get(&dev_priv->runtime_pm); intel_runtime_pm_get(gt->uncore->rpm);
} }
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) static inline void mmio_hw_access_post(struct intel_gt *gt)
{ {
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); intel_runtime_pm_put_unchecked(gt->uncore->rpm);
} }
/** /**
......
This diff is collapsed.
...@@ -244,8 +244,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, ...@@ -244,8 +244,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes) unsigned int reg, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info; struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data; u32 ier = *(u32 *)p_data;
...@@ -283,7 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, ...@@ -283,7 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt, struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg)); iir_to_regbase(reg));
u32 iir = *(u32 *)p_data; u32 iir = *(u32 *)p_data;
...@@ -321,7 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = { ...@@ -321,7 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu, static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info) struct intel_gvt_irq_info *info)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq; struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map; struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL; struct intel_gvt_irq_info *up_irq_info = NULL;
...@@ -540,7 +540,7 @@ static void gen8_init_irq( ...@@ -540,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
if (HAS_ENGINE(gvt->dev_priv, VCS1)) { if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1); INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
...@@ -572,7 +572,7 @@ static void gen8_init_irq( ...@@ -572,7 +572,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
if (IS_BROADWELL(gvt->dev_priv)) { if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
...@@ -585,7 +585,7 @@ static void gen8_init_irq( ...@@ -585,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (INTEL_GEN(gvt->dev_priv) >= 9) { } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
...@@ -622,7 +622,7 @@ static struct intel_gvt_irq_ops gen8_irq_ops = { ...@@ -622,7 +622,7 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event) enum intel_gvt_event_type event)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq; struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler; gvt_event_virt_handler_t handler;
......
...@@ -150,7 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); ...@@ -150,7 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size) unsigned long size)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int total_pages; int total_pages;
int npage; int npage;
int ret; int ret;
...@@ -218,7 +218,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -218,7 +218,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size) dma_addr_t *dma_addr, unsigned long size)
{ {
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct page *page = NULL; struct page *page = NULL;
int ret; int ret;
...@@ -241,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, ...@@ -241,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size) dma_addr_t dma_addr, unsigned long size)
{ {
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size); gvt_unpin_guest_page(vgpu, gfn, size);
...@@ -855,7 +855,7 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) ...@@ -855,7 +855,7 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
static void __intel_vgpu_release(struct intel_vgpu *vgpu) static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{ {
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
int ret; int ret;
...@@ -963,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, ...@@ -963,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EINVAL; return -EINVAL;
} }
aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
ALIGN_DOWN(off, PAGE_SIZE), ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off)); count + offset_in_page(off));
if (!aperture_va) if (!aperture_va)
......
...@@ -102,8 +102,8 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa, ...@@ -102,8 +102,8 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0; unsigned int offset = 0;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -177,8 +177,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, ...@@ -177,8 +177,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0; unsigned int offset = 0;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -251,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -251,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */ /* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
if (IS_BROXTON(vgpu->gvt->dev_priv)) { if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1)); ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
......
...@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = { ...@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
[VECS0] = 0xcb00, [VECS0] = 0xcb00,
}; };
static void load_render_mocs(struct drm_i915_private *dev_priv) static void load_render_mocs(const struct intel_engine_cs *engine)
{ {
struct intel_gvt *gvt = dev_priv->gvt; struct intel_gvt *gvt = engine->i915->gvt;
i915_reg_t offset; struct intel_uncore *uncore = engine->uncore;
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
i915_reg_t offset;
int ring_id, i; int ring_id, i;
/* Platform doesn't have mocs mmios. */ /* Platform doesn't have mocs mmios. */
...@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
return; return;
for (ring_id = 0; ring_id < cnt; ring_id++) { for (ring_id = 0; ring_id < cnt; ring_id++) {
if (!HAS_ENGINE(dev_priv, ring_id)) if (!HAS_ENGINE(engine->i915, ring_id))
continue; continue;
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) { for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] = gen9_render_mocs.control_table[ring_id][i] =
I915_READ_FW(offset); intel_uncore_read_fw(uncore, offset);
offset.reg += 4; offset.reg += 4;
} }
} }
...@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
offset.reg = 0xb020; offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] = gen9_render_mocs.l3cc_table[i] =
I915_READ_FW(offset); intel_uncore_read_fw(uncore, offset);
offset.reg += 4; offset.reg += 4;
} }
gen9_render_mocs.initialized = true; gen9_render_mocs.initialized = true;
...@@ -410,7 +412,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -410,7 +412,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
return; return;
if (!pre && !gen9_render_mocs.initialized) if (!pre && !gen9_render_mocs.initialized)
load_render_mocs(engine->i915); load_render_mocs(engine);
offset.reg = regs[engine->id]; offset.reg = regs[engine->id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) { for (i = 0; i < GEN9_MOCS_SIZE; i++) {
...@@ -577,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) ...@@ -577,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{ {
struct engine_mmio *mmio; struct engine_mmio *mmio;
if (INTEL_GEN(gvt->dev_priv) >= 9) { if (INTEL_GEN(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
......
...@@ -39,7 +39,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) ...@@ -39,7 +39,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
enum intel_engine_id i; enum intel_engine_id i;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine(engine, vgpu->gvt->gt, i) {
if (!list_empty(workload_q_head(vgpu, engine))) if (!list_empty(workload_q_head(vgpu, engine)))
return true; return true;
} }
...@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) ...@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true; scheduler->need_reschedule = true;
/* still have uncompleted workload? */ /* still have uncompleted workload? */
for_each_engine(engine, gvt->dev_priv, i) { for_each_engine(engine, gvt->gt, i) {
if (scheduler->current_workload[i]) if (scheduler->current_workload[engine->id])
return; return;
} }
...@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) ...@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false; scheduler->need_reschedule = false;
/* wake up workload dispatch thread */ /* wake up workload dispatch thread */
for_each_engine(engine, gvt->dev_priv, i) for_each_engine(engine, gvt->gt, i)
wake_up(&scheduler->waitq[i]); wake_up(&scheduler->waitq[engine->id]);
} }
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
...@@ -445,7 +445,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -445,7 +445,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct intel_gvt_workload_scheduler *scheduler = struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler; &vgpu->gvt->scheduler;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -468,7 +468,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -468,7 +468,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm); intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock); spin_lock_bh(&scheduler->mmio_context_lock);
for_each_engine(engine, &vgpu->gvt->dev_priv->gt, id) { for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[engine->id] == vgpu) { if (scheduler->engine_owner[engine->id] == vgpu) {
intel_gvt_switch_mmio(vgpu, NULL, engine); intel_gvt_switch_mmio(vgpu, NULL, engine);
scheduler->engine_owner[engine->id] = NULL; scheduler->engine_owner[engine->id] = NULL;
......
...@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload) ...@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
static void sr_oa_regs(struct intel_vgpu_workload *workload, static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save) u32 *reg_state, bool save)
{ {
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0; int i = 0;
...@@ -181,7 +181,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -181,7 +181,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = workload->engine->context_size; context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT; context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && workload->engine->id == RCS0) if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19; context_page_num = 19;
i = 2; i = 2;
...@@ -868,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, ...@@ -868,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask) intel_engine_mask_t engine_mask)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n; struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp; intel_engine_mask_t tmp;
...@@ -1065,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) ...@@ -1065,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n"); gvt_dbg_core("clean workload scheduler\n");
for_each_engine(engine, gvt->dev_priv, i) { for_each_engine(engine, gvt->gt, i) {
atomic_notifier_chain_unregister( atomic_notifier_chain_unregister(
&engine->context_status_notifier, &engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]); &gvt->shadow_ctx_notifier_block[i]);
...@@ -1084,7 +1084,7 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) ...@@ -1084,7 +1084,7 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq); init_waitqueue_head(&scheduler->workload_complete_wq);
for_each_engine(engine, gvt->dev_priv, i) { for_each_engine(engine, gvt->gt, i) {
init_waitqueue_head(&scheduler->waitq[i]); init_waitqueue_head(&scheduler->waitq[i]);
scheduler->thread[i] = kthread_run(workload_thread, engine, scheduler->thread[i] = kthread_run(workload_thread, engine,
...@@ -1142,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) ...@@ -1142,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id) for_each_engine(engine, vgpu->gvt->gt, id)
intel_context_unpin(s->shadow[id]); intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads); kmem_cache_destroy(s->workloads);
...@@ -1199,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, ...@@ -1199,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/ */
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_ppgtt *ppgtt; struct i915_ppgtt *ppgtt;
...@@ -1212,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1212,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_save(s, ppgtt); i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, &i915->gt, i) { for_each_engine(engine, vgpu->gvt->gt, i) {
struct intel_context *ce; struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]); INIT_LIST_HEAD(&s->workload_q_head[i]);
...@@ -1264,7 +1264,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1264,7 +1264,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx: out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ppgtt); i915_context_ppgtt_root_restore(s, ppgtt);
for_each_engine(engine, &i915->gt, i) { for_each_engine(engine, vgpu->gvt->gt, i) {
if (IS_ERR(s->shadow[i])) if (IS_ERR(s->shadow[i]))
break; break;
...@@ -1291,7 +1291,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, ...@@ -1291,7 +1291,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask, intel_engine_mask_t engine_mask,
unsigned int interface) unsigned int interface)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = { const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] = [INTEL_VGPU_EXECLIST_SUBMISSION] =
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu) void populate_pvinfo_page(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* setup the ballooning information */ /* setup the ballooning information */
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
...@@ -149,10 +149,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -149,10 +149,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm); high_avail / vgpu_types[i].high_mm);
if (IS_GEN(gvt->dev_priv, 8)) if (IS_GEN(gvt->gt->i915, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s", sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name); vgpu_types[i].name);
else if (IS_GEN(gvt->dev_priv, 9)) else if (IS_GEN(gvt->gt->i915, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s", sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name); vgpu_types[i].name);
...@@ -271,8 +271,8 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu) ...@@ -271,8 +271,8 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
*/ */
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment