Commit 667d11dc authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2021-01-14' of...

Merge tag 'drm-intel-fixes-2021-01-14' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

drm/i915 fixes for v5.11-rc4:
- Allow the sysadmin to override security mitigations
- Restore clear-residual mitigations for ivb/byt
- Limit VFE threads based on GT
- GVT: fix vfio edid and full display detection
- Fix DSI DSC power refcounting
- Fix LPT CPU mode backlight takeover
- Disable RPM wakeref assertions during driver shutdown
- Fix DSI sequence sleeps
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87sg73pz42.fsf@intel.com
parents 61502e3b 984cadea
...@@ -38,6 +38,7 @@ i915-y += i915_drv.o \ ...@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
i915_config.o \ i915_config.o \
i915_irq.o \ i915_irq.o \
i915_getparam.o \ i915_getparam.o \
i915_mitigations.o \
i915_params.o \ i915_params.o \
i915_pci.o \ i915_pci.o \
i915_scatterlist.o \ i915_scatterlist.o \
......
...@@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, ...@@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915, get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder)); enc_to_intel_dsi(encoder));
if (crtc_state->dsc.compression_enable)
intel_display_power_get(i915,
intel_dsc_power_domain(crtc_state));
} }
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
......
...@@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus ...@@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector); val = pch_get_backlight(connector);
else else
val = lpt_get_backlight(connector); val = lpt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
if (cpu_mode) { if (cpu_mode) {
drm_dbg_kms(&dev_priv->drm, drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n"); "CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */ /* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level); lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
...@@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus ...@@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE); cpu_ctl2 & ~BLM_PWM_ENABLE);
} }
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
return 0; return 0;
} }
......
...@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, ...@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config); intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
/* Deassert reset */ /*
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); * Give the panel time to power-on and then deassert its reset.
* Depending on the VBT MIPI sequences version the deassert-seq
* may contain the necessary delay, intel_dsi_msleep() will skip
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
msleep(intel_dsi->panel_on_delay);
}
if (IS_GEMINILAKE(dev_priv)) { if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder); glk_cold_boot = glk_dsi_enable_io(encoder);
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_gpu_commands.h" #include "intel_gpu_commands.h"
#define MAX_URB_ENTRIES 64
#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00 #define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
...@@ -34,38 +32,59 @@ struct batch_chunk { ...@@ -34,38 +32,59 @@ struct batch_chunk {
}; };
struct batch_vals { struct batch_vals {
u32 max_primitives; u32 max_threads;
u32 max_urb_entries;
u32 cmd_size;
u32 state_size;
u32 state_start; u32 state_start;
u32 batch_size; u32 surface_start;
u32 surface_height; u32 surface_height;
u32 surface_width; u32 surface_width;
u32 scratch_size; u32 size;
u32 max_size;
}; };
static inline int num_primitives(const struct batch_vals *bv)
{
/*
* We need to saturate the GPU with work in order to dispatch
* a shader on every HW thread, and clear the thread-local registers.
* In short, we have to dispatch work faster than the shaders can
* run in order to fill the EU and occupy each HW thread.
*/
return bv->max_threads;
}
static void static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{ {
if (IS_HASWELL(i915)) { if (IS_HASWELL(i915)) {
bv->max_primitives = 280; switch (INTEL_INFO(i915)->gt) {
bv->max_urb_entries = MAX_URB_ENTRIES; default:
case 1:
bv->max_threads = 70;
break;
case 2:
bv->max_threads = 140;
break;
case 3:
bv->max_threads = 280;
break;
}
bv->surface_height = 16 * 16; bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16; bv->surface_width = 32 * 2 * 16;
} else { } else {
bv->max_primitives = 128; switch (INTEL_INFO(i915)->gt) {
bv->max_urb_entries = MAX_URB_ENTRIES / 2; default:
case 1: /* including vlv */
bv->max_threads = 36;
break;
case 2:
bv->max_threads = 128;
break;
}
bv->surface_height = 16 * 8; bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16; bv->surface_width = 32 * 16;
} }
bv->cmd_size = bv->max_primitives * 4096; bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
bv->state_size = STATE_SIZE; bv->surface_start = bv->state_start + SZ_4K;
bv->state_start = bv->cmd_size; bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
bv->batch_size = bv->cmd_size + bv->state_size;
bv->scratch_size = bv->surface_height * bv->surface_width;
bv->max_size = bv->batch_size + bv->scratch_size;
} }
static void batch_init(struct batch_chunk *bc, static void batch_init(struct batch_chunk *bc,
...@@ -155,7 +174,8 @@ static u32 ...@@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state, gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv) const struct batch_vals *bv)
{ {
u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv); u32 surface_start =
gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8); u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs); u32 offset = batch_offset(state, cs);
...@@ -214,9 +234,9 @@ static void ...@@ -214,9 +234,9 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch, gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base) u32 surface_state_base)
{ {
u32 *cs = batch_alloc_items(batch, 0, 12); u32 *cs = batch_alloc_items(batch, 0, 10);
*cs++ = STATE_BASE_ADDRESS | (12 - 2); *cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */ /* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */ /* surface */
...@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch, ...@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY; *cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0; *cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY; *cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs); batch_advance(batch, cs);
} }
...@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch, ...@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size, u32 urb_size, u32 curbe_size,
u32 mode) u32 mode)
{ {
u32 urb_entries = bv->max_urb_entries; u32 threads = bv->max_threads - 1;
u32 threads = bv->max_primitives - 1;
u32 *cs = batch_alloc_items(batch, 32, 8); u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2); *cs++ = MEDIA_VFE_STATE | (8 - 2);
...@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch, ...@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0; *cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */ /* number of threads & urb entries for GPGPU vs Media Mode */
*cs++ = threads << 16 | urb_entries << 8 | mode << 2; *cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0; *cs++ = 0;
...@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch, ...@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{ {
unsigned int x_offset = (media_object_index % 16) * 64; unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16; unsigned int y_offset = (media_object_index / 16) * 16;
unsigned int inline_data_size; unsigned int pkt = 6 + 3;
unsigned int media_batch_size;
unsigned int i;
u32 *cs; u32 *cs;
inline_data_size = 112 * 8; cs = batch_alloc_items(batch, 8, pkt);
media_batch_size = inline_data_size + 6;
cs = batch_alloc_items(batch, 8, media_batch_size);
*cs++ = MEDIA_OBJECT | (media_batch_size - 2); *cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */ /* interface descriptor offset */
*cs++ = 0; *cs++ = 0;
...@@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch, ...@@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0; *cs++ = 0;
/* inline */ /* inline */
*cs++ = (y_offset << 16) | (x_offset); *cs++ = y_offset << 16 | x_offset;
*cs++ = 0; *cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS; *cs++ = GT3_INLINE_DATA_DELAYS;
for (i = 3; i < inline_data_size; i++)
*cs++ = 0;
batch_advance(batch, cs); batch_advance(batch, cs);
} }
static void gen7_emit_pipeline_flush(struct batch_chunk *batch) static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{ {
u32 *cs = batch_alloc_items(batch, 0, 5); u32 *cs = batch_alloc_items(batch, 0, 4);
*cs++ = GFX_OP_PIPE_CONTROL(5); *cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE | *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_GLOBAL_GTT_IVB; PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL;
*cs++ = 0; *cs++ = 0;
*cs++ = 0; *cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 8);
/* ivb: Stall before STATE_CACHE_INVALIDATE */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0; *cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs); batch_advance(batch, cs);
} }
...@@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma, ...@@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv) const struct batch_vals *bv)
{ {
struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_private *i915 = vma->vm->i915;
unsigned int desc_count = 64; const unsigned int desc_count = 1;
const u32 urb_size = 112; const unsigned int urb_size = 1;
struct batch_chunk cmds, state; struct batch_chunk cmds, state;
u32 interface_descriptor; u32 descriptors;
unsigned int i; unsigned int i;
batch_init(&cmds, vma, start, 0, bv->cmd_size); batch_init(&cmds, vma, start, 0, bv->state_start);
batch_init(&state, vma, start, bv->state_start, bv->state_size); batch_init(&state, vma, start, bv->state_start, SZ_4K);
interface_descriptor = descriptors = gen7_fill_interface_descriptor(&state, bv,
gen7_fill_interface_descriptor(&state, bv, IS_HASWELL(i915) ?
IS_HASWELL(i915) ? &cb_kernel_hsw :
&cb_kernel_hsw : &cb_kernel_ivb,
&cb_kernel_ivb, desc_count);
desc_count);
gen7_emit_pipeline_flush(&cmds); gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP); batch_add(&cmds, MI_NOOP);
gen7_emit_state_base_address(&cmds, interface_descriptor); gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds); gen7_emit_pipeline_flush(&cmds);
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
gen7_emit_interface_descriptor_load(&cmds, for (i = 0; i < num_primitives(bv); i++)
interface_descriptor,
desc_count);
for (i = 0; i < bv->max_primitives; i++)
gen7_emit_media_object(&cmds, i); gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END); batch_add(&cmds, MI_BATCH_BUFFER_END);
...@@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine, ...@@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv); batch_get_defaults(engine->i915, &bv);
if (!vma) if (!vma)
return bv.max_size; return bv.size;
GEM_BUG_ON(vma->obj->base.size < bv.max_size); GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch)) if (IS_ERR(batch))
return PTR_ERR(batch); return PTR_ERR(batch);
emit_batch(vma, memset(batch, 0, bv.max_size), &bv); emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj); i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj); __i915_gem_object_release_map(vma->obj);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "gen6_ppgtt.h" #include "gen6_ppgtt.h"
#include "gen7_renderclear.h" #include "gen7_renderclear.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_mitigations.h"
#include "intel_breadcrumbs.h" #include "intel_breadcrumbs.h"
#include "intel_context.h" #include "intel_context.h"
#include "intel_gt.h" #include "intel_gt.h"
...@@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq) ...@@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) { if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce) { if (engine->wa_ctx.vma->private != ce &&
i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq); ret = clear_residuals(rq);
if (ret) if (ret)
return ret; return ret;
...@@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) ...@@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine); err = gen7_ctx_switch_bb_init(engine);
if (err) if (err)
goto err_ring_unpin; goto err_ring_unpin;
......
...@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
DDI_BUF_CTL_ENABLE); DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
} }
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
/* No hpd_invert set in vgpu vbt, need to clear invert mask */
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
...@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
} }
...@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) | (PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
} }
...@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) | (PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
} }
...@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) ...@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
PORTD_HOTPLUG_STATUS_MASK; PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) { } else if (IS_BROXTON(i915)) {
if (connected) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
} else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~PORTA_HOTPLUG_STATUS_MASK;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTA_HOTPLUG_LONG_DETECT;
intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIC_DETECTED; SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= } else {
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
} else {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) &= vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED; ~SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
vgpu_vreg_t(vgpu, SFUSE_STRAP) &= GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
~SFUSE_STRAP_DDIC_DETECTED; vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~PORTB_HOTPLUG_STATUS_MASK;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_LONG_DETECT;
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIC_DETECTED;
} else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIC_DETECTED;
} }
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~PORTC_HOTPLUG_STATUS_MASK;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTC_HOTPLUG_LONG_DETECT;
intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
} }
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
} }
} }
......
...@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_clean_sched_policy; goto out_clean_sched_policy;
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
/* FixMe: Re-enable APL/BXT once vfio_edid enabled */ else
else if (!IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret) if (ret)
goto out_clean_sched_policy; goto out_clean_sched_policy;
......
...@@ -1047,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) ...@@ -1047,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915) void i915_driver_shutdown(struct drm_i915_private *i915)
{ {
disable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_gem_suspend(i915); i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm); drm_kms_helper_poll_disable(&i915->drm);
...@@ -1060,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915) ...@@ -1060,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915); intel_suspend_encoders(i915);
intel_shutdown_encoders(i915); intel_shutdown_encoders(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
} }
static bool suspend_to_idle(struct drm_i915_private *dev_priv) static bool suspend_to_idle(struct drm_i915_private *dev_priv)
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "i915_drv.h"
#include "i915_mitigations.h"
static unsigned long mitigations __read_mostly = ~0UL;
enum {
CLEAR_RESIDUALS = 0,
};
static const char * const names[] = {
[CLEAR_RESIDUALS] = "residuals",
};
bool i915_mitigate_clear_residuals(void)
{
return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
}
static int mitigations_set(const char *val, const struct kernel_param *kp)
{
unsigned long new = ~0UL;
char *str, *sep, *tok;
bool first = true;
int err = 0;
BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
str = kstrdup(val, GFP_KERNEL);
if (!str)
return -ENOMEM;
for (sep = str; (tok = strsep(&sep, ","));) {
bool enable = true;
int i;
/* Be tolerant of leading/trailing whitespace */
tok = strim(tok);
if (first) {
first = false;
if (!strcmp(tok, "auto"))
continue;
new = 0;
if (!strcmp(tok, "off"))
continue;
}
if (*tok == '!') {
enable = !enable;
tok++;
}
if (!strncmp(tok, "no", 2)) {
enable = !enable;
tok += 2;
}
if (*tok == '\0')
continue;
for (i = 0; i < ARRAY_SIZE(names); i++) {
if (!strcmp(tok, names[i])) {
if (enable)
new |= BIT(i);
else
new &= ~BIT(i);
break;
}
}
if (i == ARRAY_SIZE(names)) {
pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
DRIVER_NAME, val, tok);
err = -EINVAL;
break;
}
}
kfree(str);
if (err)
return err;
WRITE_ONCE(mitigations, new);
return 0;
}
static int mitigations_get(char *buffer, const struct kernel_param *kp)
{
unsigned long local = READ_ONCE(mitigations);
int count, i;
bool enable;
if (!local)
return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
if (local & BIT(BITS_PER_LONG - 1)) {
count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
enable = false;
} else {
enable = true;
count = 0;
}
for (i = 0; i < ARRAY_SIZE(names); i++) {
if ((local & BIT(i)) != enable)
continue;
count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s%s,", enable ? "" : "!", names[i]);
}
buffer[count - 1] = '\n';
return count;
}
static const struct kernel_param_ops ops = {
.set = mitigations_set,
.get = mitigations_get,
};
module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
MODULE_PARM_DESC(mitigations,
"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
"\n"
" auto -- enables all mitigations required for the platform [default]\n"
" off -- disables all mitigations\n"
"\n"
"Individual mitigations can be enabled by passing a comma-separated string,\n"
"e.g. mitigations=residuals to enable only clearing residuals or\n"
"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
"disabling it.\n"
"\n"
"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
" residuals -- clear all thread-local registers between contexts"
);
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __I915_MITIGATIONS_H__
#define __I915_MITIGATIONS_H__
#include <linux/types.h>
bool i915_mitigate_clear_residuals(void);
#endif /* __I915_MITIGATIONS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment