Commit 64eea680 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2022-05-20' of...

Merge tag 'drm-intel-fixes-2022-05-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- fix for #5806: GPU hangs and display artifacts on 5.18-rc3 on Intel GM45
- reject DMC with out-of-spec MMIO (Cc: stable)
- correctly mark guilty contexts on GuC reset.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YocqqvG6PbYx3QgJ@jlahtine-mobl.ger.corp.intel.com
parents 6e4a61cd 7b1d6924
...@@ -367,6 +367,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, ...@@ -367,6 +367,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
} }
} }
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
int header_ver, u8 dmc_id)
{
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
u32 start_range, end_range;
int i;
if (dmc_id >= DMC_FW_MAX) {
drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
return false;
}
if (header_ver == 1) {
start_range = DMC_MMIO_START_RANGE;
end_range = DMC_MMIO_END_RANGE;
} else if (dmc_id == DMC_FW_MAIN) {
start_range = TGL_MAIN_MMIO_START;
end_range = TGL_MAIN_MMIO_END;
} else if (DISPLAY_VER(i915) >= 13) {
start_range = ADLP_PIPE_MMIO_START;
end_range = ADLP_PIPE_MMIO_END;
} else if (DISPLAY_VER(i915) >= 12) {
start_range = TGL_PIPE_MMIO_START(dmc_id);
end_range = TGL_PIPE_MMIO_END(dmc_id);
} else {
drm_warn(&i915->drm, "Unknown mmio range for sanity check");
return false;
}
for (i = 0; i < mmio_count; i++) {
if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
return false;
}
return true;
}
static u32 parse_dmc_fw_header(struct intel_dmc *dmc, static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header, const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id) size_t rem_size, u8 dmc_id)
...@@ -436,6 +474,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, ...@@ -436,6 +474,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0; return 0;
} }
if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
dmc_header->header_ver, dmc_id)) {
drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
return 0;
}
for (i = 0; i < mmio_count; i++) { for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i]; dmc_info->mmiodata[i] = mmiodata[i];
......
...@@ -1252,14 +1252,12 @@ static void *reloc_iomap(struct i915_vma *batch, ...@@ -1252,14 +1252,12 @@ static void *reloc_iomap(struct i915_vma *batch,
* Only attempt to pin the batch buffer to ggtt if the current batch * Only attempt to pin the batch buffer to ggtt if the current batch
* is not inside ggtt, or the batch buffer is not misplaced. * is not inside ggtt, or the batch buffer is not misplaced.
*/ */
if (!i915_is_ggtt(batch->vm)) { if (!i915_is_ggtt(batch->vm) ||
!i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) {
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0, vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
PIN_MAPPABLE | PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ | PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT); PIN_NOEVICT);
} else if (i915_vma_is_map_and_fenceable(batch)) {
__i915_vma_pin(batch);
vma = batch;
} }
if (vma == ERR_PTR(-EDEADLK)) if (vma == ERR_PTR(-EDEADLK))
......
...@@ -806,7 +806,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) ...@@ -806,7 +806,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
__intel_engine_reset(engine, stalled_mask & engine->mask); __intel_engine_reset(engine, stalled_mask & engine->mask);
local_bh_enable(); local_bh_enable();
intel_uc_reset(&gt->uc, true); intel_uc_reset(&gt->uc, ALL_ENGINES);
intel_ggtt_restore_fences(gt->ggtt); intel_ggtt_restore_fences(gt->ggtt);
......
...@@ -438,7 +438,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc); ...@@ -438,7 +438,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc);
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq); void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
void intel_guc_submission_reset_prepare(struct intel_guc *guc); void intel_guc_submission_reset_prepare(struct intel_guc *guc);
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled); void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
void intel_guc_submission_reset_finish(struct intel_guc *guc); void intel_guc_submission_reset_finish(struct intel_guc *guc);
void intel_guc_submission_cancel_requests(struct intel_guc *guc); void intel_guc_submission_cancel_requests(struct intel_guc *guc);
......
...@@ -1590,9 +1590,9 @@ __unwind_incomplete_requests(struct intel_context *ce) ...@@ -1590,9 +1590,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
spin_unlock_irqrestore(&sched_engine->lock, flags); spin_unlock_irqrestore(&sched_engine->lock, flags);
} }
static void __guc_reset_context(struct intel_context *ce, bool stalled) static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
{ {
bool local_stalled; bool guilty;
struct i915_request *rq; struct i915_request *rq;
unsigned long flags; unsigned long flags;
u32 head; u32 head;
...@@ -1620,7 +1620,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) ...@@ -1620,7 +1620,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
if (!intel_context_is_pinned(ce)) if (!intel_context_is_pinned(ce))
goto next_context; goto next_context;
local_stalled = false; guilty = false;
rq = intel_context_find_active_request(ce); rq = intel_context_find_active_request(ce);
if (!rq) { if (!rq) {
head = ce->ring->tail; head = ce->ring->tail;
...@@ -1628,14 +1628,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) ...@@ -1628,14 +1628,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
} }
if (i915_request_started(rq)) if (i915_request_started(rq))
local_stalled = true; guilty = stalled & ce->engine->mask;
GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(i915_active_is_idle(&ce->active));
head = intel_ring_wrap(ce->ring, rq->head); head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, local_stalled && stalled); __i915_request_reset(rq, guilty);
out_replay: out_replay:
guc_reset_state(ce, head, local_stalled && stalled); guc_reset_state(ce, head, guilty);
next_context: next_context:
if (i != number_children) if (i != number_children)
ce = list_next_entry(ce, parallel.child_link); ce = list_next_entry(ce, parallel.child_link);
...@@ -1645,7 +1645,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) ...@@ -1645,7 +1645,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
intel_context_put(parent); intel_context_put(parent);
} }
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{ {
struct intel_context *ce; struct intel_context *ce;
unsigned long index; unsigned long index;
...@@ -4013,7 +4013,7 @@ static void guc_context_replay(struct intel_context *ce) ...@@ -4013,7 +4013,7 @@ static void guc_context_replay(struct intel_context *ce)
{ {
struct i915_sched_engine *sched_engine = ce->engine->sched_engine; struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
__guc_reset_context(ce, true); __guc_reset_context(ce, ce->engine->mask);
tasklet_hi_schedule(&sched_engine->tasklet); tasklet_hi_schedule(&sched_engine->tasklet);
} }
......
...@@ -593,7 +593,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc) ...@@ -593,7 +593,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
__uc_sanitize(uc); __uc_sanitize(uc);
} }
void intel_uc_reset(struct intel_uc *uc, bool stalled) void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
{ {
struct intel_guc *guc = &uc->guc; struct intel_guc *guc = &uc->guc;
......
...@@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc); ...@@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc);
void intel_uc_driver_remove(struct intel_uc *uc); void intel_uc_driver_remove(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc); void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_reset(struct intel_uc *uc, bool stalled); void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
void intel_uc_reset_finish(struct intel_uc *uc); void intel_uc_reset_finish(struct intel_uc *uc);
void intel_uc_cancel_requests(struct intel_uc *uc); void intel_uc_cancel_requests(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc);
......
...@@ -5501,6 +5501,22 @@ ...@@ -5501,6 +5501,22 @@
/* MMIO address range for DMC program (0x80000 - 0x82FFF) */ /* MMIO address range for DMC program (0x80000 - 0x82FFF) */
#define DMC_MMIO_START_RANGE 0x80000 #define DMC_MMIO_START_RANGE 0x80000
#define DMC_MMIO_END_RANGE 0x8FFFF #define DMC_MMIO_END_RANGE 0x8FFFF
#define DMC_V1_MMIO_START_RANGE 0x80000
#define TGL_MAIN_MMIO_START 0x8F000
#define TGL_MAIN_MMIO_END 0x8FFFF
#define _TGL_PIPEA_MMIO_START 0x92000
#define _TGL_PIPEA_MMIO_END 0x93FFF
#define _TGL_PIPEB_MMIO_START 0x96000
#define _TGL_PIPEB_MMIO_END 0x97FFF
#define ADLP_PIPE_MMIO_START 0x5F000
#define ADLP_PIPE_MMIO_END 0x5FFFF
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
_TGL_PIPEB_MMIO_START)
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
_TGL_PIPEB_MMIO_END)
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030) #define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C) #define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038) #define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment