Commit d6a841a4 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2018-02-01' of...

Merge tag 'drm-intel-next-fixes-2018-02-01' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Fixes for GPU hangs and other bugs around hangcheck and result;
Fix for regression on suspend case with vgaswitcheroo;
Fixes for eDP and HDMI blank screens
Fix for protecting WC allocation to avoid overflow on page vec;
Cleanup around unpublished GLK firmware blobs, and other small fixes.

This also contains GVT pull request mostly with regression
fixes on vGPU display dmabuf, mmio switch and other misc changes.

* tag 'drm-intel-next-fixes-2018-02-01' of git://anongit.freedesktop.org/drm/drm-intel: (21 commits)
  drm/i915/ppgtt: Pin page directories before allocation
  drm/i915: Always run hangcheck while the GPU is busy
  Revert "drm/i915: mark all device info struct with __initconst"
  drm/i915/edp: Do not do link training fallback or prune modes on EDP
  drm/i915: Check for fused or unused pipes
  drm/i915: Protect WC stash allocation against direct reclaim
  drm/i915: Only attempt to scan the requested number of shrinker slabs
  drm/i915: Always call to intel_display_set_init_power() in resume_early.
  drm/i915/gvt: cancel scheduler timer when no vGPU exists
  drm/i915/gvt: cancel virtual vblank timer when no vGPU exists
  drm/i915/gvt: Keep obj->dma_buf link NULL during exporting
  drm/i915/pmu: Reconstruct active state on starting busy-stats
  drm/i915: Stop getting the fault address from RING_FAULT_REG
  drm/i915/guc: Add uc_fini_wq in gem_init unwind path
  drm/i915: Fix using BIT_ULL() vs. BIT() for power domain masks
  drm/i915: Try EDID bitbanging on HDMI after failed read
  drm/i915/glk: Disable Guc and HuC on GLK
  drm/i915/gvt: Do not use I915_NUM_ENGINES to iterate over the mocs regs array
  drm/i915/gvt: validate gfn before set shadow page entry
  drm/i915/gvt: add PLANE_KEYMAX regs to mmio track list
  ...
parents 24b8ef69 751b01cb
......@@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
ret = PTR_ERR(dmabuf);
goto out_free_gem;
}
obj->base.dma_buf = dmabuf;
i915_gem_object_put(obj);
......
......@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
unsigned long i;
unsigned long gfn, i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
......@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) {
ret = gtt_entry_p2m(vgpu, &ge, &se);
if (ret)
goto fail;
gfn = ops->get_pfn(&ge);
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
gtt_entry_p2m(vgpu, &ge, &se))
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i);
}
return 0;
......@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma;
unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m;
int ret;
......@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
bytes);
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) {
gvt_vgpu_err("fail to translate guest gtt entry\n");
......@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
out:
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
gtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
......
......@@ -2843,6 +2843,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
......
......@@ -58,6 +58,7 @@ struct intel_gvt_mpt {
int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
extern struct intel_gvt_mpt xengt_mpt;
......
......@@ -1570,6 +1570,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
return PFN_DOWN(__pa(addr));
}
static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
if (!handle_valid(handle))
return false;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
return kvm_is_visible_gfn(kvm, gfn);
}
struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
......@@ -1585,6 +1600,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
.set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
......
......@@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{ /* Terminated */ }
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
......@@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
{ /* Terminated */ }
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
......@@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
};
int ring_id, i;
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs.control_table[ring_id][i] =
......@@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
switch_mocs(pre, next, ring_id);
mmio = dev_priv->gvt->engine_mmio_list;
while (i915_mmio_reg_offset((mmio++)->reg)) {
for (mmio = dev_priv->gvt->engine_mmio_list;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id)
continue;
// save
......
......@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
intel_gvt_host.mpt->put_vfio_device(vgpu);
}
/**
* intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
* @vgpu: a vGPU
* @gfn: guest PFN
*
* Returns:
* true on valid gfn, false on not.
*/
static inline bool intel_gvt_hypervisor_is_valid_gfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
if (!intel_gvt_host.mpt->is_valid_gfn)
return true;
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
#endif /* _GVT_MPT_H_ */
......@@ -308,8 +308,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
/* this vgpu id has been removed */
if (idr_is_empty(&gvt->vgpu_idr))
hrtimer_cancel(&sched_data->timer);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
......
......@@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_debugfs_remove_vgpu(vgpu);
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
......
......@@ -1842,6 +1842,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_GEN9_LP(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
else
intel_display_set_init_power(dev_priv, true);
i915_gem_sanitize(dev_priv);
......
......@@ -3323,16 +3323,15 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
/* Keep the retire handler running until we are finally idle.
/*
* Keep the retire handler running until we are finally idle.
* We do not need to do this test under locking as in the worst-case
* we queue the retire worker once too often.
*/
if (READ_ONCE(dev_priv->gt.awake)) {
i915_queue_hangcheck(dev_priv);
if (READ_ONCE(dev_priv->gt.awake))
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
}
}
static inline bool
......@@ -5283,6 +5282,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_uc_fini_wq(dev_priv);
if (ret != -EIO)
i915_gem_cleanup_userptr(dev_priv);
......
......@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct pagevec *pvec = &vm->free_pages;
struct pagevec stash;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
......@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
/* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
/*
* Otherwise batch allocate pages to amoritize cost of set_pages_wc.
*
* We have to be careful as page allocation may trigger the shrinker
* (via direct reclaim) which will fill up the WC stash underneath us.
* So we add our WB pages into a temporary pvec on the stack and merge
* them into the WC stash after all the allocations are complete.
*/
pagevec_init(&stash);
do {
struct page *page;
......@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
pvec->pages[pvec->nr++] = page;
} while (pagevec_space(pvec));
stash.pages[stash.nr++] = page;
} while (stash.nr < pagevec_space(pvec));
if (unlikely(!pvec->nr))
return NULL;
if (stash.nr) {
int nr = min_t(int, stash.nr, pagevec_space(pvec));
struct page **pages = stash.pages + stash.nr - nr;
set_pages_array_wc(pvec->pages, pvec->nr);
if (nr && !set_pages_array_wc(pages, nr)) {
memcpy(pvec->pages + pvec->nr,
pages, sizeof(pages[0]) * nr);
pvec->nr += nr;
stash.nr -= nr;
}
return pvec->pages[--pvec->nr];
pagevec_release(&stash);
}
return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
}
static void vm_free_pages_release(struct i915_address_space *vm,
......@@ -1341,15 +1359,18 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
pd->used_pdes++;
pt = alloc_pt(vm);
if (IS_ERR(pt))
if (IS_ERR(pt)) {
pd->used_pdes--;
goto unwind;
}
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
......@@ -1373,13 +1394,16 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (pd == vm->scratch_pd) {
pdp->used_pdpes++;
pd = alloc_pd(vm);
if (IS_ERR(pd))
if (IS_ERR(pd)) {
pdp->used_pdpes--;
goto unwind;
}
gen8_initialize_pd(vm, pd);
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
pdp->used_pdpes++;
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
......@@ -2287,12 +2311,23 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
u32 fault = I915_READ(GEN8_RING_FAULT_REG);
if (fault & RING_FAULT_VALID) {
u32 fault_data0, fault_data1;
u64 fault_addr;
fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddr: 0x%08x_%08x\n"
"\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
fault & PAGE_MASK,
upper_32_bits(fault_addr),
lower_32_bits(fault_addr),
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
......
......@@ -276,6 +276,8 @@ static void mark_busy(struct drm_i915_private *i915)
intel_engines_unpark(i915);
i915_queue_hangcheck(i915);
queue_delayed_work(i915->wq,
&i915->gt.retire_work,
round_jiffies_up_relative(HZ));
......
......@@ -363,13 +363,13 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
if (sc->nr_scanned < sc->nr_to_scan)
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (freed < sc->nr_to_scan && current_is_kswapd()) {
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(i915);
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
......
This diff is collapsed.
......@@ -2489,6 +2489,8 @@ enum i915_power_well_id {
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
......
......@@ -779,7 +779,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
return NULL;
/* MST */
......
......@@ -149,17 +149,6 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
return;
mod_timer(&b->fake_irq, jiffies + 1);
/* Ensure that even if the GPU hangs, we get woken up.
*
* However, note that if no one is waiting, we never notice
* a gpu hang. Eventually, we will have to wait for a resource
* held by the GPU and so trigger a hangcheck. In the most
* pathological case, this will be upon memory starvation! To
* prevent this, we also queue the hangcheck from the retire
* worker.
*/
i915_queue_hangcheck(engine->i915);
}
static void irq_enable(struct intel_engine_cs *engine)
......
......@@ -5661,8 +5661,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
if (!crtc_state->base.active)
return 0;
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
......@@ -5674,7 +5674,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
}
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
mask |= BIT(POWER_DOMAIN_AUDIO);
mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
if (crtc_state->shared_dpll)
mask |= BIT_ULL(POWER_DOMAIN_PLLS);
......
......@@ -328,14 +328,22 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
return;
failure_handling:
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
intel_connector->base.base.id,
intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
if (!intel_dp_get_link_train_fallback_values(intel_dp,
intel_dp->link_rate,
intel_dp->lane_count))
/* Schedule a Hotplug Uevent to userspace to start modeset */
schedule_work(&intel_connector->modeset_retry_work);
/* Dont fallback and prune modes if its eDP */
if (!intel_dp_is_edp(intel_dp)) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
intel_connector->base.base.id,
intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
if (!intel_dp_get_link_train_fallback_values(intel_dp,
intel_dp->link_rate,
intel_dp->lane_count))
/* Schedule a Hotplug Uevent to userspace to start modeset */
schedule_work(&intel_connector->modeset_retry_work);
} else {
DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
intel_connector->base.base.id,
intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
}
return;
}
......@@ -1951,8 +1951,22 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled == ~0)
goto busy;
if (engine->stats.enabled++ == 0)
if (engine->stats.enabled++ == 0) {
struct intel_engine_execlists *execlists = &engine->execlists;
const struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
engine->stats.enabled_at = ktime_get();
/* XXX submission method oblivious? */
while (num_ports-- && port_isset(port)) {
engine->stats.active++;
port++;
}
if (engine->stats.active)
engine->stats.start = engine->stats.enabled_at;
}
spin_unlock_irqrestore(&engine->stats.lock, flags);
return 0;
......
......@@ -39,9 +39,6 @@
#define KBL_FW_MAJOR 9
#define KBL_FW_MINOR 39
#define GLK_FW_MAJOR 10
#define GLK_FW_MINOR 56
#define GUC_FW_PATH(platform, major, minor) \
"i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
......@@ -54,8 +51,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
......@@ -82,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->path = I915_KBL_GUC_UCODE;
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
guc_fw->path = I915_GLK_GUC_UCODE;
guc_fw->major_ver_wanted = GLK_FW_MAJOR;
guc_fw->minor_ver_wanted = GLK_FW_MINOR;
} else {
DRM_WARN("%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(guc_fw->type));
......
......@@ -411,7 +411,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
if (!i915_modparams.enable_hangcheck)
return;
......@@ -429,7 +428,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
const bool busy = intel_engine_has_waiter(engine);
struct intel_engine_hangcheck hc;
semaphore_clear_deadlocks(dev_priv);
......@@ -443,16 +441,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
busy_count += busy;
}
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
/* Reset timer in case GPU hangs without another request being added */
if (busy_count)
i915_queue_hangcheck(dev_priv);
i915_queue_hangcheck(dev_priv);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
......
......@@ -1595,12 +1595,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
......
......@@ -54,10 +54,6 @@
#define KBL_HUC_FW_MINOR 00
#define KBL_BLD_NUM 1810
#define GLK_HUC_FW_MAJOR 02
#define GLK_HUC_FW_MINOR 00
#define GLK_BLD_NUM 1748
#define HUC_FW_PATH(platform, major, minor, bld_num) \
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
__stringify(minor) "_" __stringify(bld_num) ".bin"
......@@ -74,9 +70,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
static void huc_fw_select(struct intel_uc_fw *huc_fw)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
......@@ -103,10 +96,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
huc_fw->path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
huc_fw->path = I915_GLK_HUC_UCODE;
huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
DRM_WARN("%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(huc_fw->type));
......
......@@ -209,8 +209,6 @@ void intel_uc_fini_wq(struct drm_i915_private *dev_priv)
if (!USES_GUC(dev_priv))
return;
GEM_BUG_ON(!HAS_GUC(dev_priv));
intel_guc_fini_wq(&dev_priv->guc);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment