Commit 69d3e5a5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-03-20' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Hope you are well hiding out above the garage. A few amdgpu changes
  but nothing too major. I've had a wisdom tooth out this week so
  haven't been to on top of things, but all seems good.

  core:
   - fix lease warning

  i915:
   - Track active elements during dequeue
   - Fix failure to handle all MCR ranges
   - Revert unnecessary workaround

  amdgpu:
   - Pageflip fix
   - VCN clockgating fixes
   - GPR debugfs fix for umr
   - GPU reset fix
   - eDP fix for MBP
   - DCN2.x fix

  dw-hdmi:
   - fix AVI frame colorimetry

  komeda:
   - fix compiler warning

  bochs:
   - downgrade a binding failure to a warning"

* tag 'drm-fixes-2020-03-20' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/display: Fix pageflip event race condition for DCN.
  drm/amdgpu: fix typo for vcn2.5/jpeg2.5 idle check
  drm/amdgpu: fix typo for vcn2/jpeg2 idle check
  drm/amdgpu: fix typo for vcn1 idle check
  drm/lease: fix WARNING in idr_destroy
  drm/i915: Handle all MCR ranges
  Revert "drm/i915/tgl: Add extra hdc flush workaround"
  drm/i915/execlists: Track active elements during dequeue
  drm/bochs: downgrade pci_request_region failure from error to warning
  drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
  drm/amdgpu: add fbdev suspend/resume on gpu reset
  drm/amd/amdgpu: Fix GPR read from debugfs (v2)
  drm/amd/display: fix typos for dcn20_funcs and dcn21_funcs struct
  drm/komeda: mark PM functions as __maybe_unused
  drm/bridge: dw-hdmi: fix AVI frame colorimetry
parents 6c90b86a 5366b96b
...@@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ...@@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
ssize_t result = 0; ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
if (size & 3 || *pos & 3) if (size > 4096 || size & 3 || *pos & 3)
return -EINVAL; return -EINVAL;
/* decode offset */ /* decode offset */
offset = *pos & GENMASK_ULL(11, 0); offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
se = (*pos & GENMASK_ULL(19, 12)) >> 12; se = (*pos & GENMASK_ULL(19, 12)) >> 12;
sh = (*pos & GENMASK_ULL(27, 20)) >> 20; sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
cu = (*pos & GENMASK_ULL(35, 28)) >> 28; cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
...@@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ...@@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
while (size) { while (size) {
uint32_t value; uint32_t value;
value = data[offset++]; value = data[result >> 2];
r = put_user(value, (uint32_t *)buf); r = put_user(value, (uint32_t *)buf);
if (r) { if (r) {
result = r; result = r;
......
...@@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, ...@@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
if (r) if (r)
goto out; goto out;
amdgpu_fbdev_set_suspend(tmp_adev, 0);
/* must succeed. */ /* must succeed. */
amdgpu_ras_resume(tmp_adev); amdgpu_ras_resume(tmp_adev);
...@@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/ */
amdgpu_unregister_gpu_instance(tmp_adev); amdgpu_unregister_gpu_instance(tmp_adev);
amdgpu_fbdev_set_suspend(adev, 1);
/* disable ras on ALL IPs */ /* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) && if (!(in_ras_intr && !use_baco) &&
amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_device_ip_need_full_reset(tmp_adev))
......
...@@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, ...@@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE); bool enable = (state == AMD_CG_STATE_GATE);
if (enable) { if (enable) {
if (jpeg_v2_0_is_idle(handle)) if (!jpeg_v2_0_is_idle(handle))
return -EBUSY; return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev); jpeg_v2_0_enable_clock_gating(adev);
} else { } else {
......
...@@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, ...@@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue; continue;
if (enable) { if (enable) {
if (jpeg_v2_5_is_idle(handle)) if (!jpeg_v2_5_is_idle(handle))
return -EBUSY; return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i); jpeg_v2_5_enable_clock_gating(adev, i);
} else { } else {
......
...@@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, ...@@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
if (enable) { if (enable) {
/* wait for STATUS to clear */ /* wait for STATUS to clear */
if (vcn_v1_0_is_idle(handle)) if (!vcn_v1_0_is_idle(handle))
return -EBUSY; return -EBUSY;
vcn_v1_0_enable_clock_gating(adev); vcn_v1_0_enable_clock_gating(adev);
} else { } else {
......
...@@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle, ...@@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
if (enable) { if (enable) {
/* wait for STATUS to clear */ /* wait for STATUS to clear */
if (vcn_v2_0_is_idle(handle)) if (!vcn_v2_0_is_idle(handle))
return -EBUSY; return -EBUSY;
vcn_v2_0_enable_clock_gating(adev); vcn_v2_0_enable_clock_gating(adev);
} else { } else {
......
...@@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle, ...@@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0; return 0;
if (enable) { if (enable) {
if (vcn_v2_5_is_idle(handle)) if (!vcn_v2_5_is_idle(handle))
return -EBUSY; return -EBUSY;
vcn_v2_5_enable_clock_gating(adev); vcn_v2_5_enable_clock_gating(adev);
} else { } else {
......
...@@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) ...@@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
acrtc_state = to_dm_crtc_state(acrtc->base.state); acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state)); amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
drm_crtc_handle_vblank(&acrtc->base); drm_crtc_handle_vblank(&acrtc->base);
...@@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) ...@@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
&acrtc_state->vrr_params.adjust); &acrtc_state->vrr_params.adjust);
} }
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { /*
* If there aren't any active_planes then DCH HUBP may be clock-gated.
* In that case, pageflip completion interrupts won't fire and pageflip
* completion events won't get delivered. Prevent this by sending
* pending pageflip events from here if a flip is still pending.
*
* If any planes are enabled, use dm_pflip_high_irq() instead, to
* avoid race conditions between flip programming and completion,
* which could cause too early flip completion events.
*/
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc_state->active_planes == 0) {
if (acrtc->event) { if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
acrtc->event = NULL; acrtc->event = NULL;
......
...@@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link) ...@@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link)
sink_id.ieee_device_id, sink_id.ieee_device_id,
sizeof(sink_id.ieee_device_id)); sizeof(sink_id.ieee_device_id));
/* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
{
uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
sizeof(str_mbp_2017))) {
link->reported_link_cap.link_rate = 0x0c;
}
}
core_link_read_dpcd( core_link_read_dpcd(
link, link,
DP_SINK_HW_REVISION_START, DP_SINK_HW_REVISION_START,
......
...@@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = { ...@@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane, .enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control, .dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control,
.dsc_pg_control = NULL,
.update_odm = dcn20_update_odm, .update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control, .dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
......
...@@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { ...@@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane, .enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control, .dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control,
.dsc_pg_control = NULL,
.update_odm = dcn20_update_odm, .update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control, .dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
......
...@@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = { ...@@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
MODULE_DEVICE_TABLE(of, komeda_of_match); MODULE_DEVICE_TABLE(of, komeda_of_match);
static int komeda_rt_pm_suspend(struct device *dev) static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
{ {
struct komeda_drv *mdrv = dev_get_drvdata(dev); struct komeda_drv *mdrv = dev_get_drvdata(dev);
return komeda_dev_suspend(mdrv->mdev); return komeda_dev_suspend(mdrv->mdev);
} }
static int komeda_rt_pm_resume(struct device *dev) static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
{ {
struct komeda_drv *mdrv = dev_get_drvdata(dev); struct komeda_drv *mdrv = dev_get_drvdata(dev);
......
...@@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev) ...@@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
size = min(size, mem); size = min(size, mem);
} }
if (pci_request_region(pdev, 0, "bochs-drm") != 0) { if (pci_request_region(pdev, 0, "bochs-drm") != 0)
DRM_ERROR("Cannot request framebuffer\n"); DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
return -EBUSY;
}
bochs->fb_map = ioremap(addr, size); bochs->fb_map = ioremap(addr, size);
if (bochs->fb_map == NULL) { if (bochs->fb_map == NULL) {
......
...@@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) ...@@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
frame.colorspace = HDMI_COLORSPACE_RGB; frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */ /* Set up colorimetry */
switch (hdmi->hdmi_data.enc_out_encoding) { if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
case V4L2_YCBCR_ENC_601: switch (hdmi->hdmi_data.enc_out_encoding) {
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) case V4L2_YCBCR_ENC_601:
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
else frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601; frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
}
} else {
frame.colorimetry = HDMI_COLORIMETRY_NONE;
frame.extended_colorimetry = frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
case V4L2_YCBCR_ENC_709:
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
else
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
break;
default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
frame.extended_colorimetry =
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
break;
} }
frame.scan_mode = HDMI_SCAN_MODE_NONE; frame.scan_mode = HDMI_SCAN_MODE_NONE;
......
...@@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, ...@@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
} }
DRM_DEBUG_LEASE("Creating lease\n"); DRM_DEBUG_LEASE("Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases); lessee = drm_lease_create(lessor, &leases);
if (IS_ERR(lessee)) { if (IS_ERR(lessee)) {
ret = PTR_ERR(lessee); ret = PTR_ERR(lessee);
idr_destroy(&leases);
goto out_leases; goto out_leases;
} }
...@@ -580,7 +582,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, ...@@ -580,7 +582,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
out_leases: out_leases:
put_unused_fd(fd); put_unused_fd(fd);
idr_destroy(&leases);
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret; return ret;
......
...@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, ...@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock); spin_unlock(&old->breadcrumbs.irq_lock);
} }
static struct i915_request *
last_active(const struct intel_engine_execlists *execlists)
{
struct i915_request * const *last = READ_ONCE(execlists->active);
while (*last && i915_request_completed(*last))
last++;
return *last;
}
#define for_each_waiter(p__, rq__) \ #define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \ list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \ &(rq__)->sched.waiters_list, \
...@@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists) ...@@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
} }
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
struct i915_request *rq;
rq = last_active(&engine->execlists);
if (!rq) if (!rq)
return 0; return 0;
...@@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) ...@@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms); return READ_ONCE(engine->props.preempt_timeout_ms);
} }
static void set_preempt_timeout(struct intel_engine_cs *engine) static void set_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
if (!intel_engine_has_preempt_reset(engine)) if (!intel_engine_has_preempt_reset(engine))
return; return;
set_timer_ms(&engine->execlists.preempt, set_timer_ms(&engine->execlists.preempt,
active_preempt_timeout(engine)); active_preempt_timeout(engine, rq));
} }
static inline void clear_ports(struct i915_request **ports, int count) static inline void clear_ports(struct i915_request **ports, int count)
...@@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending; struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask; struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request * const *active;
struct i915_request *last; struct i915_request *last;
struct rb_node *rb; struct rb_node *rb;
bool submit = false; bool submit = false;
...@@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case * i.e. we will retrigger preemption following the ack in case
* of trouble. * of trouble.
*/ */
last = last_active(execlists); active = READ_ONCE(execlists->active);
while ((last = *active) && i915_request_completed(last))
active++;
if (last) { if (last) {
if (need_preempt(engine, last, rb)) { if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
...@@ -2110,7 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2110,7 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Skip if we ended up with exactly the same set of requests, * Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts * e.g. trying to timeslice a pair of ordered contexts
*/ */
if (!memcmp(execlists->active, execlists->pending, if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) { (port - execlists->pending + 1) * sizeof(*port))) {
do do
execlists_schedule_out(fetch_and_zero(port)); execlists_schedule_out(fetch_and_zero(port));
...@@ -2121,7 +2113,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2121,7 +2113,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
clear_ports(port + 1, last_port - port); clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine); execlists_submit_ports(engine);
set_preempt_timeout(engine); set_preempt_timeout(engine, *active);
} else { } else {
skip_submit: skip_submit:
ring_set_paused(engine, 0); ring_set_paused(engine, 0);
...@@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request, ...@@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
*cs++ = preparser_disable(false); *cs++ = preparser_disable(false);
intel_ring_advance(request, cs); intel_ring_advance(request, cs);
/*
* Wa_1604544889:tgl
*/
if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
flags = 0;
flags |= PIPE_CONTROL_CS_STALL;
flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen8_emit_pipe_control(cs, flags,
LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
} }
return 0; return 0;
......
...@@ -1529,15 +1529,34 @@ create_scratch(struct i915_address_space *vm, int count) ...@@ -1529,15 +1529,34 @@ create_scratch(struct i915_address_space *vm, int count)
return ERR_PTR(err); return ERR_PTR(err);
} }
static const struct {
u32 start;
u32 end;
} mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
{ .start = 0xb000, .end = 0xb3ff },
{ .start = 0xe000, .end = 0xe7ff },
{},
};
static bool mcr_range(struct drm_i915_private *i915, u32 offset) static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{ {
int i;
if (INTEL_GEN(i915) < 8)
return false;
/* /*
* Registers in this range are affected by the MCR selector * Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not * which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path. * work for CS access so we cannot verify them on this path.
*/ */
if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) for (i = 0; mcr_ranges_gen8[i].start; i++)
return true; if (offset >= mcr_ranges_gen8[i].start &&
offset <= mcr_ranges_gen8[i].end)
return true;
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment