Commit e3476c00 authored by Xu Han's avatar Xu Han Committed by Zhenyu Wang

drm/i915/gvt: Add KBL dispatch logic in each function.

Extend function dispatch logic to support KBL platform.
Signed-off-by: default avatarXu Han <xu.han@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 6f696d13
...@@ -1215,7 +1215,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, ...@@ -1215,7 +1215,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip) if (!info->async_flip)
return 0; return 0;
if (IS_SKYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10; GENMASK(12, 10)) >> 10;
...@@ -1243,7 +1243,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1243,7 +1243,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12); info->surf_val << 12);
if (IS_SKYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val); info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
...@@ -1267,7 +1267,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1267,7 +1267,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info); return gen8_decode_mi_display_flip(s, info);
if (IS_SKYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_decode_mi_display_flip(s, info); return skl_decode_mi_display_flip(s, info);
return -ENODEV; return -ENODEV;
...@@ -1278,7 +1278,9 @@ static int check_mi_display_flip(struct parser_exec_state *s, ...@@ -1278,7 +1278,9 @@ static int check_mi_display_flip(struct parser_exec_state *s,
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) if (IS_BROADWELL(dev_priv)
|| IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv))
return gen8_check_mi_display_flip(s, info); return gen8_check_mi_display_flip(s, info);
return -ENODEV; return -ENODEV;
} }
...@@ -1289,7 +1291,9 @@ static int update_plane_mmio_from_mi_display_flip( ...@@ -1289,7 +1291,9 @@ static int update_plane_mmio_from_mi_display_flip(
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) if (IS_BROADWELL(dev_priv)
|| IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv))
return gen8_update_plane_mmio_from_mi_display_flip(s, info); return gen8_update_plane_mmio_from_mi_display_flip(s, info);
return -ENODEV; return -ENODEV;
} }
...@@ -1569,7 +1573,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) ...@@ -1569,7 +1573,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{ {
struct intel_gvt *gvt = s->vgpu->gvt; struct intel_gvt *gvt = s->vgpu->gvt;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */ /* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8)) if (cmd_val(s, 0) & (1 << 8))
return 0; return 0;
......
...@@ -173,7 +173,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -173,7 +173,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT); SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |= vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
...@@ -203,7 +203,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -203,7 +203,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
} }
if (IS_SKYLAKE(dev_priv) && if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
} }
...@@ -365,7 +365,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) ...@@ -365,7 +365,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D); clean_virtual_dp_monitor(vgpu, PORT_D);
else else
clean_virtual_dp_monitor(vgpu, PORT_B); clean_virtual_dp_monitor(vgpu, PORT_B);
...@@ -387,7 +387,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) ...@@ -387,7 +387,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu); intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution); resolution);
else else
......
...@@ -2220,7 +2220,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2220,7 +2220,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n"); gvt_dbg_core("init gtt\n");
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops; gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops; gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
......
...@@ -106,7 +106,8 @@ static void init_device_info(struct intel_gvt *gvt) ...@@ -106,7 +106,8 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8; info->max_support_vgpus = 8;
info->cfg_space_size = 256; info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024; info->mmio_size = 2 * 1024 * 1024;
......
...@@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) ...@@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_BDW; return D_BDW;
else if (IS_SKYLAKE(gvt->dev_priv)) else if (IS_SKYLAKE(gvt->dev_priv))
return D_SKL; return D_SKL;
else if (IS_KABYLAKE(gvt->dev_priv))
return D_KBL;
return 0; return 0;
} }
...@@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, ...@@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset); old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
switch (offset) { switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG: case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
...@@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, ...@@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset); data = vgpu_vreg(vgpu, offset);
if (IS_SKYLAKE(vgpu->gvt->dev_priv) && if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { || IS_KABYLAKE(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */ /* SKL DPB/C/D aux ctl register changed */
return 0; return 0;
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) && } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
...@@ -1303,7 +1307,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1303,7 +1307,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) { switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY: case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
/** /**
* "Read memory latency" command on gen9. * "Read memory latency" command on gen9.
* Below memory latency values are read * Below memory latency values are read
...@@ -1316,7 +1321,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1316,7 +1321,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
} }
break; break;
case SKL_PCODE_CDCLK_CONTROL: case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE; *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break; break;
case GEN6_PCODE_READ_RC6VIDS: case GEN6_PCODE_READ_RC6VIDS:
...@@ -2886,7 +2892,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ...@@ -2886,7 +2892,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_broadwell_mmio_info(gvt); ret = init_broadwell_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
} else if (IS_SKYLAKE(dev_priv)) { } else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt); ret = init_broadwell_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
......
...@@ -580,7 +580,7 @@ static void gen8_init_irq( ...@@ -580,7 +580,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (IS_SKYLAKE(gvt->dev_priv)) { } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
...@@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) ...@@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n"); gvt_dbg_core("init irq framework\n");
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
irq->ops = &gen8_irq_ops; irq->ops = &gen8_irq_ops;
irq->irq_map = gen8_irq_map; irq->irq_map = gen8_irq_map;
} else { } else {
......
...@@ -171,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -171,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/ */
fw = intel_uncore_forcewake_for_reg(dev_priv, reg, fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && IS_SKYLAKE(dev_priv)) if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
fw |= FORCEWAKE_RENDER; fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw); intel_uncore_forcewake_get(dev_priv, fw);
...@@ -204,7 +204,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -204,7 +204,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return; return;
if (!IS_SKYLAKE(dev_priv)) if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return; return;
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
...@@ -242,7 +242,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -242,7 +242,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return; return;
if (!IS_SKYLAKE(dev_priv)) if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return; return;
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
...@@ -277,7 +277,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) ...@@ -277,7 +277,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 inhibit_mask = u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list; mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list); array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id); load_mocs(vgpu, ring_id);
...@@ -324,7 +325,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id) ...@@ -324,7 +325,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 v; u32 v;
int i, array_size; int i, array_size;
if (IS_SKYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list; mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list); array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id); restore_mocs(vgpu, ring_id);
......
...@@ -448,7 +448,8 @@ static int workload_thread(void *priv) ...@@ -448,7 +448,8 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL; struct intel_vgpu *vgpu = NULL;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p); kfree(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment