Commit 27db6f7b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-05-13-1' of...

Merge tag 'drm-intel-fixes-2020-05-13-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Handle idling during i915_gem_evict_something busy loops (Chris)
- Mark current submissions with a weak-dependency (Chris)
- Propagate errror from completed fences (Chris)
- Fixes on execlist to avoid GPU hang situation (Chris)
- Fixes couple deadlocks (Chris)
- Timeslice preemption fixes (Chris)
- Fix Display Port interrupt handling on Tiger Lake (Imre)
- Reduce debug noise around Frame Buffer Compression
+(Peter)
- Fix logic around IPC W/a for Coffee Lake and Kaby Lake
+(Sultan)
- Avoid dereferencing a dead context (Chris)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200514040235.GA2164266@intel.com
parents f59bcda8 955da9d7
......@@ -1721,6 +1721,9 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
if (p->flags & I915_DEPENDENCY_WEAK)
continue;
/* Leave semaphores spinning on the other engines */
if (w->engine != rq->engine)
continue;
......
......@@ -208,14 +208,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK;
vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
/*
* Only 1 PIPE enabled in current vGPU display and PIPE_A is
* tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
* so we fixed to DPLL0 here.
* Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
*/
vgpu_vreg_t(vgpu, DPLL_CTRL1) =
DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
vgpu_vreg_t(vgpu, LCPLL1_CTL) =
LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
/*
* Golden M/N are calculated based on:
* 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
......@@ -236,6 +263,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
......@@ -256,6 +289,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
......
......@@ -379,7 +379,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
/* skip now as current i915 ppgtt alloc won't allocate
top level pdp for non 4-level table, won't impact
shadow ppgtt. */
if (!pd)
break;
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
......
......@@ -128,6 +128,13 @@ i915_gem_evict_something(struct i915_address_space *vm,
active = NULL;
INIT_LIST_HEAD(&eviction_list);
list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
if (vma == active) { /* now seen this vma twice */
if (flags & PIN_NONBLOCK)
break;
active = ERR_PTR(-EAGAIN);
}
/*
* We keep this list in a rough least-recently scanned order
* of active elements (inactive elements are cheap to reap).
......@@ -143,22 +150,13 @@ i915_gem_evict_something(struct i915_address_space *vm,
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
if (i915_vma_is_active(vma)) {
if (vma == active) {
if (flags & PIN_NONBLOCK)
break;
active = ERR_PTR(-EAGAIN);
}
if (active != ERR_PTR(-EAGAIN)) {
if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
if (!active)
active = vma;
list_move_tail(&vma->vm_link, &vm->bound_list);
continue;
}
}
if (mark_free(&scan, vma, flags, &eviction_list))
goto found;
......
......@@ -1017,11 +1017,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
if (i915_request_completed(from))
if (i915_request_completed(from)) {
i915_sw_fence_set_error_once(&to->submit, from->fence.error);
return 0;
}
if (to->engine->schedule) {
ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
ret = i915_sched_node_add_dependency(&to->sched,
&from->sched,
I915_DEPENDENCY_EXTERNAL);
if (ret < 0)
return ret;
}
......@@ -1183,7 +1187,9 @@ __i915_request_await_execution(struct i915_request *to,
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
err = i915_sched_node_add_dependency(&to->sched,
&from->sched,
I915_DEPENDENCY_WEAK);
if (err < 0)
return err;
}
......
......@@ -456,7 +456,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
}
int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
struct i915_sched_node *signal,
unsigned long flags)
{
struct i915_dependency *dep;
......@@ -465,8 +466,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_EXTERNAL |
I915_DEPENDENCY_ALLOC))
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
return 0;
......
......@@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
unsigned long flags);
int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal);
struct i915_sched_node *signal,
unsigned long flags);
void i915_sched_node_fini(struct i915_sched_node *node);
......
......@@ -78,6 +78,7 @@ struct i915_dependency {
unsigned long flags;
#define I915_DEPENDENCY_ALLOC BIT(0)
#define I915_DEPENDENCY_EXTERNAL BIT(1)
#define I915_DEPENDENCY_WEAK BIT(2)
};
#endif /* _I915_SCHEDULER_TYPES_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment