Commit c3b5a843 authored by fred gao's avatar fred gao Committed by Zhenyu Wang

drm/i915/gvt: Enable gfx virtualiztion for CFL

Use INTEL_GEN to simplify the code for SKL+ platforms.

v2:
- split the enabling code into final one to identify any regression.

Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: default avatarFei Jiang <fei.jiang@intel.com>
Signed-off-by: default avatarfred gao <fred.gao@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 5cd02703
...@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* It's good enough to support initializing mmio by lri command in * It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL. * vgpu inhibit context on KBL.
*/ */
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) && intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) { !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu, intel_gvt_hypervisor_read_gpa(s->vgpu,
...@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, ...@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip) if (!info->async_flip)
return 0; return 0;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10; GENMASK(12, 10)) >> 10;
...@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12); info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val); info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
...@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info); return gen8_decode_mi_display_flip(s, info);
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv))
return skl_decode_mi_display_flip(s, info); return skl_decode_mi_display_flip(s, info);
return -ENODEV; return -ENODEV;
......
...@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT); SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
...@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
} }
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
} }
...@@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) ...@@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D); clean_virtual_dp_monitor(vgpu, PORT_D);
else else
clean_virtual_dp_monitor(vgpu, PORT_B); clean_virtual_dp_monitor(vgpu, PORT_B);
...@@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) ...@@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu); intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution); resolution);
else else
......
...@@ -163,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, ...@@ -163,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT; obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0; obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
unsigned int tiling_mode = 0; unsigned int tiling_mode = 0;
unsigned int stride = 0; unsigned int stride = 0;
......
...@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, ...@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg; u32 stride = stride_reg;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
switch (tiled) { switch (tiled) {
case PLANE_CTL_TILED_LINEAR: case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64; stride = stride_reg * 64;
...@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
plane->tiled = val & PLANE_CTL_TILED_MASK; plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm( fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK, val & PLANE_CTL_FORMAT_MASK,
...@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
} }
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
(IS_SKYLAKE(dev_priv) (INTEL_GEN(dev_priv) >= 9) ?
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) : (_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp); _PRI_PLANE_STRIDE_MASK, plane->bpp);
......
...@@ -283,9 +283,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, ...@@ -283,9 +283,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset); old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_BROXTON(vgpu->gvt->dev_priv)) {
switch (offset) { switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG: case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
...@@ -891,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, ...@@ -891,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset); data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv) if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_BROXTON(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */ /* SKL DPB/C/D aux ctl register changed */
return 0; return 0;
...@@ -1409,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1409,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) { switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY: case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) { || IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
/** /**
* "Read memory latency" command on gen9. * "Read memory latency" command on gen9.
* Below memory latency values are read * Below memory latency values are read
...@@ -1433,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1433,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
break; break;
case SKL_PCODE_CDCLK_CONTROL: case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) || IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE; *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break; break;
case GEN6_PCODE_READ_RC6VIDS: case GEN6_PCODE_READ_RC6VIDS:
...@@ -3304,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ...@@ -3304,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret) if (ret)
goto err; goto err;
} else if (IS_SKYLAKE(dev_priv) } else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)) { || IS_KABYLAKE(dev_priv)
|| IS_COFFEELAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt); ret = init_broadwell_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
......
...@@ -581,9 +581,7 @@ static void gen8_init_irq( ...@@ -581,9 +581,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (IS_SKYLAKE(gvt->dev_priv) } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
|| IS_KABYLAKE(gvt->dev_priv)
|| IS_BROXTON(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
......
...@@ -351,8 +351,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -351,8 +351,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/ */
fw = intel_uncore_forcewake_for_reg(dev_priv, reg, fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
fw |= FORCEWAKE_RENDER; fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw); intel_uncore_forcewake_get(dev_priv, fw);
...@@ -389,7 +388,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -389,7 +388,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return; return;
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS) if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
|| IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
return; return;
if (!pre && !gen9_render_mocs.initialized) if (!pre && !gen9_render_mocs.initialized)
...@@ -455,9 +455,7 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -455,9 +455,7 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v; u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv))
switch_mocs(pre, next, ring_id); switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio; for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
...@@ -469,8 +467,8 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -469,8 +467,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and * state image on kabylake, it's initialized by lri command and
* save or restore with context together. * save or restore with context together.
*/ */
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
&& mmio->in_context) || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
continue; continue;
// save // save
...@@ -563,9 +561,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) ...@@ -563,9 +561,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{ {
struct engine_mmio *mmio; struct engine_mmio *mmio;
if (IS_SKYLAKE(gvt->dev_priv) || if (INTEL_GEN(gvt->dev_priv) >= 9)
IS_KABYLAKE(gvt->dev_priv) ||
IS_BROXTON(gvt->dev_priv))
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
......
...@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
void *shadow_ring_buffer_va; void *shadow_ring_buffer_va;
u32 *cs; u32 *cs;
if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)) if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
|| IS_COFFEELAKE(req->i915))
&& is_inhibit_context(req->hw_context)) && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req); intel_vgpu_restore_inhibit_context(vgpu, req);
...@@ -939,9 +940,7 @@ static int workload_thread(void *priv) ...@@ -939,9 +940,7 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL; struct intel_vgpu *vgpu = NULL;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
|| IS_KABYLAKE(gvt->dev_priv)
|| IS_BROXTON(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p); kfree(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment