Commit d0093404 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2015-05-08' of git://anongit.freedesktop.org/drm-intel into drm-next

- skl plane scaler support (Chandra Kondru)
- enable hsw cmd parser (Daniel and fix from Rebecca Palmer)
- skl dc5/6 support (low power display modes) from Suketu&Sunil
- dp compliance testing patches (Todd Previte)
- dp link training optimization (Mika Kahola)
- fixes to make skl resume work (Damien)
- rework modeset code to fully use atomic state objects (Ander&Maarten)
- pile of bxt w/a patchs from Nick Hoath
- (linear) partial gtt mmap support (Joonas Lahtinen)

* tag 'drm-intel-next-2015-05-08' of git://anongit.freedesktop.org/drm-intel: (103 commits)
  drm/i915: Update DRIVER_DATE to 20150508
  drm/i915: Only wait for required lanes in vlv_wait_port_ready()
  drm/i915: Fix possible security hole in command parsing
  drm/edid: Kerneldoc for newly added edid_corrupt
  drm/i915: Reject huge tiled objects
  Revert "drm/i915: Hack to tie both common lanes together on chv"
  drm/i915: Work around DISPLAY_PHY_CONTROL register corruption on CHV
  drm/i915: Implement chv display PHY lane stagger setup
  drm/i915/vlv: remove wait for previous GFX clk disable request
  drm/i915: Set crtc_state->active to false when CRTC is disabled (v2)
  drm/i915/skl: Re-indent part of skl_ddi_calculate_wrpll()
  drm/i915: Use partial view in mmap fault handler
  drm/i915: Add a partial GGTT view type
  drm/i915: Consider object pinned if any VMA is pinned
  drm/i915: Do not make assumptions on GGTT VMA sizes
  drm/i915/bxt: Mark WaCcsTlbPrefetchDisable as for Broxton also.
  drm/i915/bxt: Mark WaDisablePartialResolveInVc as for Broxton also.
  drm/i915/bxt: Mark Wa4x4STCOptimizationDisable as for Broxton also.
  drm/i915/bxt: Move WaForceEnableNonCoherent to Skylake only
  drm/i915/bxt: Enable WaEnableYV12BugFixInHalfSliceChicken7 for Broxton
  ...
parents dde10068 214a2b7f
...@@ -280,6 +280,8 @@ mode_fixup(struct drm_atomic_state *state) ...@@ -280,6 +280,8 @@ mode_fixup(struct drm_atomic_state *state)
*/ */
encoder = conn_state->best_encoder; encoder = conn_state->best_encoder;
funcs = encoder->helper_private; funcs = encoder->helper_private;
if (!funcs)
continue;
if (encoder->bridge && encoder->bridge->funcs->mode_fixup) { if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
ret = encoder->bridge->funcs->mode_fixup( ret = encoder->bridge->funcs->mode_fixup(
...@@ -317,6 +319,9 @@ mode_fixup(struct drm_atomic_state *state) ...@@ -317,6 +319,9 @@ mode_fixup(struct drm_atomic_state *state)
continue; continue;
funcs = crtc->helper_private; funcs = crtc->helper_private;
if (!funcs->mode_fixup)
continue;
ret = funcs->mode_fixup(crtc, &crtc_state->mode, ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode); &crtc_state->adjusted_mode);
if (!ret) { if (!ret) {
......
...@@ -1041,13 +1041,15 @@ static bool drm_edid_is_zero(const u8 *in_edid, int length) ...@@ -1041,13 +1041,15 @@ static bool drm_edid_is_zero(const u8 *in_edid, int length)
* @raw_edid: pointer to raw EDID block * @raw_edid: pointer to raw EDID block
* @block: type of block to validate (0 for base, extension otherwise) * @block: type of block to validate (0 for base, extension otherwise)
* @print_bad_edid: if true, dump bad EDID blocks to the console * @print_bad_edid: if true, dump bad EDID blocks to the console
* @edid_corrupt: if true, the header or checksum is invalid
* *
* Validate a base or extension EDID block and optionally dump bad blocks to * Validate a base or extension EDID block and optionally dump bad blocks to
* the console. * the console.
* *
* Return: True if the block is valid, false otherwise. * Return: True if the block is valid, false otherwise.
*/ */
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt)
{ {
u8 csum; u8 csum;
struct edid *edid = (struct edid *)raw_edid; struct edid *edid = (struct edid *)raw_edid;
...@@ -1060,11 +1062,22 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) ...@@ -1060,11 +1062,22 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
if (block == 0) { if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid); int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ; if (score == 8) {
else if (score >= edid_fixup) { if (edid_corrupt)
*edid_corrupt = false;
} else if (score >= edid_fixup) {
/* Displayport Link CTS Core 1.2 rev1.1 test 4.2.2.6
* The corrupt flag needs to be set here otherwise, the
* fix-up code here will correct the problem, the
* checksum is correct and the test fails
*/
if (edid_corrupt)
*edid_corrupt = true;
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header)); memcpy(raw_edid, edid_header, sizeof(edid_header));
} else { } else {
if (edid_corrupt)
*edid_corrupt = true;
goto bad; goto bad;
} }
} }
...@@ -1075,6 +1088,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) ...@@ -1075,6 +1088,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
} }
if (edid_corrupt)
*edid_corrupt = true;
/* allow CEA to slide through, switches mangle this */ /* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02) if (raw_edid[0] != 0x02)
goto bad; goto bad;
...@@ -1129,7 +1145,7 @@ bool drm_edid_is_valid(struct edid *edid) ...@@ -1129,7 +1145,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false; return false;
for (i = 0; i <= edid->extensions; i++) for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
return false; return false;
return true; return true;
...@@ -1232,7 +1248,8 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, ...@@ -1232,7 +1248,8 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
if (get_edid_block(data, block, 0, EDID_LENGTH)) if (get_edid_block(data, block, 0, EDID_LENGTH))
goto out; goto out;
if (drm_edid_block_valid(block, 0, print_bad_edid)) if (drm_edid_block_valid(block, 0, print_bad_edid,
&connector->edid_corrupt))
break; break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++; connector->null_edid_counter++;
...@@ -1257,7 +1274,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, ...@@ -1257,7 +1274,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
block + (valid_extensions + 1) * EDID_LENGTH, block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH)) j, EDID_LENGTH))
goto out; goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { if (drm_edid_block_valid(block + (valid_extensions + 1)
* EDID_LENGTH, j,
print_bad_edid,
NULL)) {
valid_extensions++; valid_extensions++;
break; break;
} }
......
...@@ -216,7 +216,8 @@ static void *edid_load(struct drm_connector *connector, const char *name, ...@@ -216,7 +216,8 @@ static void *edid_load(struct drm_connector *connector, const char *name,
goto out; goto out;
} }
if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { if (!drm_edid_block_valid(edid, 0, print_bad_edid,
&connector->edid_corrupt)) {
connector->bad_edid_counter++; connector->bad_edid_counter++;
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name); name);
...@@ -229,7 +230,9 @@ static void *edid_load(struct drm_connector *connector, const char *name, ...@@ -229,7 +230,9 @@ static void *edid_load(struct drm_connector *connector, const char *name,
if (i != valid_extensions + 1) if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH, memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH); edid + i * EDID_LENGTH, EDID_LENGTH);
if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid)) if (drm_edid_block_valid(edid + i * EDID_LENGTH, i,
print_bad_edid,
NULL))
valid_extensions++; valid_extensions++;
} }
......
...@@ -12,7 +12,8 @@ i915-y := i915_drv.o \ ...@@ -12,7 +12,8 @@ i915-y := i915_drv.o \
i915_suspend.o \ i915_suspend.o \
i915_sysfs.o \ i915_sysfs.o \
intel_pm.o \ intel_pm.o \
intel_runtime_pm.o intel_runtime_pm.o \
intel_csr.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
......
...@@ -1211,12 +1211,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1211,12 +1211,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK); GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup & seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK); GEN6_CURBSYTAVG_MASK);
seq_printf(m, "Up threshold: %d%%\n",
dev_priv->rps.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK); GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK); GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK); GEN6_CURBSYTAVG_MASK);
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
max_freq = (rp_state_cap & 0xff0000) >> 16; max_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
...@@ -1232,12 +1237,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1232,12 +1237,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n", seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
seq_printf(m, "Current freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n", seq_printf(m, "Idle freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
seq_printf(m, "Max freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
} else if (IS_VALLEYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts; u32 freq_sts;
...@@ -1246,6 +1260,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1246,6 +1260,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "actual GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n", seq_printf(m, "max GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
...@@ -1258,9 +1278,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1258,9 +1278,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, seq_printf(m,
"efficient (RPe) frequency: %d MHz\n", "efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
} else { } else {
seq_puts(m, "no P-state info available\n"); seq_puts(m, "no P-state info available\n");
...@@ -3594,8 +3611,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) ...@@ -3594,8 +3611,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
intel_display_power_get(dev_priv, intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
dev_priv->display.crtc_disable(&crtc->base); intel_crtc_reset(crtc);
dev_priv->display.crtc_enable(&crtc->base);
} }
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} }
...@@ -3616,8 +3632,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) ...@@ -3616,8 +3632,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
if (crtc->config->pch_pfit.force_thru) { if (crtc->config->pch_pfit.force_thru) {
crtc->config->pch_pfit.force_thru = false; crtc->config->pch_pfit.force_thru = false;
dev_priv->display.crtc_disable(&crtc->base); intel_crtc_reset(crtc);
dev_priv->display.crtc_enable(&crtc->base);
intel_display_power_put(dev_priv, intel_display_power_put(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
...@@ -3934,6 +3949,212 @@ static const struct file_operations i915_display_crc_ctl_fops = { ...@@ -3934,6 +3949,212 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write .write = display_crc_ctl_write
}; };
static ssize_t i915_displayport_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
struct seq_file *m;
struct drm_device *dev;
struct drm_connector *connector;
struct list_head *connector_list;
struct intel_dp *intel_dp;
int val = 0;
m = file->private_data;
if (!m) {
status = -ENODEV;
return status;
}
dev = m->private;
if (!dev) {
status = -ENODEV;
return status;
}
connector_list = &dev->mode_config.connector_list;
if (len == 0)
return 0;
input_buffer = kmalloc(len + 1, GFP_KERNEL);
if (!input_buffer)
return -ENOMEM;
if (copy_from_user(input_buffer, ubuf, len)) {
status = -EFAULT;
goto out;
}
input_buffer[len] = '\0';
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->connector_type ==
DRM_MODE_CONNECTOR_DisplayPort &&
connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
goto out;
DRM_DEBUG_DRIVER("Got %d for test active\n", val);
/* To prevent erroneous activation of the compliance
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
intel_dp->compliance_test_active = 1;
else
intel_dp->compliance_test_active = 0;
}
}
out:
kfree(input_buffer);
if (status < 0)
return status;
*offp += len;
return len;
}
static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct drm_connector *connector;
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
if (!dev)
return -ENODEV;
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
if (intel_dp->compliance_test_active)
seq_puts(m, "1");
else
seq_puts(m, "0");
} else
seq_puts(m, "0");
}
return 0;
}
static int i915_displayport_test_active_open(struct inode *inode,
struct file *file)
{
struct drm_device *dev = inode->i_private;
return single_open(file, i915_displayport_test_active_show, dev);
}
static const struct file_operations i915_displayport_test_active_fops = {
.owner = THIS_MODULE,
.open = i915_displayport_test_active_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = i915_displayport_test_active_write
};
static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct drm_connector *connector;
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
if (!dev)
return -ENODEV;
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
seq_printf(m, "%lx", intel_dp->compliance_test_data);
} else
seq_puts(m, "0");
}
return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
struct file *file)
{
struct drm_device *dev = inode->i_private;
return single_open(file, i915_displayport_test_data_show, dev);
}
static const struct file_operations i915_displayport_test_data_fops = {
.owner = THIS_MODULE,
.open = i915_displayport_test_data_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release
};
static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct drm_connector *connector;
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
if (!dev)
return -ENODEV;
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
seq_printf(m, "%02lx", intel_dp->compliance_test_type);
} else
seq_puts(m, "0");
}
return 0;
}
static int i915_displayport_test_type_open(struct inode *inode,
struct file *file)
{
struct drm_device *dev = inode->i_private;
return single_open(file, i915_displayport_test_type_show, dev);
}
static const struct file_operations i915_displayport_test_type_fops = {
.owner = THIS_MODULE,
.open = i915_displayport_test_type_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release
};
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{ {
struct drm_device *dev = m->private; struct drm_device *dev = m->private;
...@@ -4829,6 +5050,9 @@ static const struct i915_debugfs_files { ...@@ -4829,6 +5050,9 @@ static const struct i915_debugfs_files {
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_fbc_false_color", &i915_fbc_fc_fops}, {"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops}
}; };
void intel_display_crc_init(struct drm_device *dev) void intel_display_crc_init(struct drm_device *dev)
......
...@@ -816,6 +816,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -816,6 +816,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->mmio_flip_lock); spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->csr_lock);
intel_pm_setup(dev); intel_pm_setup(dev);
...@@ -861,9 +862,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -861,9 +862,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_init(dev); intel_uncore_init(dev);
/* Load CSR Firmware for SKL */
intel_csr_ucode_init(dev);
ret = i915_gem_gtt_init(dev); ret = i915_gem_gtt_init(dev);
if (ret) if (ret)
goto out_regs; goto out_freecsr;
/* WARNING: Apparently we must kick fbdev drivers before vgacon, /* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */ * otherwise the vga fbdev driver falls over. */
...@@ -1033,7 +1037,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1033,7 +1037,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
io_mapping_free(dev_priv->gtt.mappable); io_mapping_free(dev_priv->gtt.mappable);
out_gtt: out_gtt:
i915_global_gtt_cleanup(dev); i915_global_gtt_cleanup(dev);
out_regs: out_freecsr:
intel_csr_ucode_fini(dev);
intel_uncore_fini(dev); intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge: put_bridge:
...@@ -1113,6 +1118,8 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1113,6 +1118,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
intel_csr_ucode_fini(dev);
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
......
...@@ -556,6 +556,26 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) ...@@ -556,6 +556,26 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
} }
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
/*
* If the reason is not known assume -ENOENT since that's the most
* usual failure mode.
*/
if (!err)
err = -ENOENT;
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
return;
DRM_ERROR(
"The driver is built-in, so to load the firmware you need to\n"
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
"in your initrd/initramfs image.\n");
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv) static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
...@@ -574,6 +594,8 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) ...@@ -574,6 +594,8 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
static int intel_suspend_complete(struct drm_i915_private *dev_priv); static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume); bool rpm_resume);
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
static int i915_drm_suspend(struct drm_device *dev) static int i915_drm_suspend(struct drm_device *dev)
{ {
...@@ -788,6 +810,8 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -788,6 +810,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv);
...@@ -1002,6 +1026,19 @@ static int i915_pm_resume(struct device *dev) ...@@ -1002,6 +1026,19 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev); return i915_drm_resume(drm_dev);
} }
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
{
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
/*
* This is to ensure that CSR isn't identified as loaded before
* CSR-loading program is called during runtime-resume.
*/
intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
return 0;
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv) static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{ {
hsw_enable_pc8(dev_priv); hsw_enable_pc8(dev_priv);
...@@ -1041,6 +1078,15 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv) ...@@ -1041,6 +1078,15 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
intel_csr_load_program(dev);
return 0;
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
...@@ -1502,6 +1548,8 @@ static int intel_runtime_resume(struct device *device) ...@@ -1502,6 +1548,8 @@ static int intel_runtime_resume(struct device *device)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv); ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv))
...@@ -1536,6 +1584,8 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -1536,6 +1584,8 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
ret = bxt_suspend_complete(dev_priv); ret = bxt_suspend_complete(dev_priv);
else if (IS_SKYLAKE(dev))
ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ret = hsw_suspend_complete(dev_priv); ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20150423" #define DRIVER_DATE "20150508"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
...@@ -238,6 +238,11 @@ enum hpd_pin { ...@@ -238,6 +238,11 @@ enum hpd_pin {
#define for_each_crtc(dev, crtc) \ #define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
&dev->mode_config.plane_list, \
base.head)
#define for_each_intel_crtc(dev, intel_crtc) \ #define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
...@@ -295,7 +300,7 @@ struct intel_dpll_hw_state { ...@@ -295,7 +300,7 @@ struct intel_dpll_hw_state {
/* skl */ /* skl */
/* /*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of crtl1 and they get shifted into position when writing * lower part of ctrl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share * the register. This allows us to easily compare the state to share
* the DPLL. * the DPLL.
*/ */
...@@ -669,6 +674,22 @@ struct intel_uncore { ...@@ -669,6 +674,22 @@ struct intel_uncore {
#define for_each_fw_domain(domain__, dev_priv__, i__) \ #define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
enum csr_state {
FW_UNINITIALIZED = 0,
FW_LOADED,
FW_FAILED
};
struct intel_csr {
const char *fw_path;
__be32 *dmc_payload;
uint32_t dmc_fw_size;
uint32_t mmio_count;
uint32_t mmioaddr[8];
uint32_t mmiodata[8];
enum csr_state state;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \ func(is_mobile) sep \
func(is_i85x) sep \ func(is_i85x) sep \
...@@ -1348,7 +1369,6 @@ struct intel_vbt_data { ...@@ -1348,7 +1369,6 @@ struct intel_vbt_data {
bool edp_initialized; bool edp_initialized;
bool edp_support; bool edp_support;
int edp_bpp; int edp_bpp;
bool edp_low_vswing;
struct edp_power_seq edp_pps; struct edp_power_seq edp_pps;
struct { struct {
...@@ -1574,6 +1594,11 @@ struct drm_i915_private { ...@@ -1574,6 +1594,11 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu; struct i915_virtual_gpu vgpu;
struct intel_csr csr;
/* Display CSR-related protection */
struct mutex csr_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PINS]; struct intel_gmbus gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus /** gmbus_mutex protects against concurrent usage of the single hw gmbus
...@@ -1757,6 +1782,8 @@ struct drm_i915_private { ...@@ -1757,6 +1782,8 @@ struct drm_i915_private {
u32 fdi_rx_config; u32 fdi_rx_config;
u32 chv_phy_control;
u32 suspend_count; u32 suspend_count;
struct i915_suspend_saved_registers regfile; struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state; struct vlv_s0ix_state vlv_s0ix_state;
...@@ -1825,6 +1852,8 @@ struct drm_i915_private { ...@@ -1825,6 +1852,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring); void (*stop_ring)(struct intel_engine_cs *ring);
} gt; } gt;
bool edp_low_vswing;
/* /*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place. * will be rejected. Instead look for a better place.
...@@ -2422,10 +2451,13 @@ struct drm_i915_cmd_table { ...@@ -2422,10 +2451,13 @@ struct drm_i915_cmd_table {
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev)) IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_SKYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
...@@ -2491,6 +2523,7 @@ struct i915_params { ...@@ -2491,6 +2523,7 @@ struct i915_params {
int mmio_debug; int mmio_debug;
bool verbose_state_checks; bool verbose_state_checks;
bool nuclear_pageflip; bool nuclear_pageflip;
int edp_vswing;
}; };
extern struct i915_params i915 __read_mostly; extern struct i915_params i915 __read_mostly;
...@@ -2516,6 +2549,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); ...@@ -2516,6 +2549,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
void i915_firmware_load_error_print(const char *fw_path, int err);
/* i915_irq.c */ /* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev); void i915_queue_hangcheck(struct drm_device *dev);
......
...@@ -1635,6 +1635,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1635,6 +1635,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn; unsigned long pfn;
int ret = 0; int ret = 0;
...@@ -1667,8 +1668,23 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1667,8 +1668,23 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock; goto unlock;
} }
/* Now bind it into the GTT if needed */ /* Use a partial view if the object is bigger than the aperture. */
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); if (obj->base.size >= dev_priv->gtt.mappable_end &&
obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int,
chunk_size,
(vma->vm_end - vma->vm_start)/PAGE_SIZE -
view.params.partial.offset);
}
/* Now pin it into the GTT if needed */
ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -1681,9 +1697,28 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1681,9 +1697,28 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin; goto unpin;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); pfn = dev_priv->gtt.mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT; pfn >>= PAGE_SHIFT;
if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
/* Overriding existing pages in partial view does not cause
* us any trouble as TLBs are still valid because the fault
* is due to userspace losing part of the mapping or never
* having accessed it before (at this partials' range).
*/
unsigned long base = vma->vm_start +
(view.params.partial.offset << PAGE_SHIFT);
unsigned int i;
for (i = 0; i < view.params.partial.size; i++) {
ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
if (ret)
break;
}
obj->fault_mappable = true;
} else {
if (!obj->fault_mappable) { if (!obj->fault_mappable) {
unsigned long size = min_t(unsigned long, unsigned long size = min_t(unsigned long,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
...@@ -1703,8 +1738,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1703,8 +1738,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, ret = vm_insert_pfn(vma,
(unsigned long)vmf->virtual_address, (unsigned long)vmf->virtual_address,
pfn + page_offset); pfn + page_offset);
}
unpin: unpin:
i915_gem_object_ggtt_unpin(obj); i915_gem_object_ggtt_unpin_view(obj, &view);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
out: out:
...@@ -1897,11 +1933,6 @@ i915_gem_mmap_gtt(struct drm_file *file, ...@@ -1897,11 +1933,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock; goto unlock;
} }
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
if (obj->madv != I915_MADV_WILLNEED) { if (obj->madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
ret = -EFAULT; ret = -EFAULT;
...@@ -3069,6 +3100,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -3069,6 +3100,7 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
vma->vm->unbind_vma(vma); vma->vm->unbind_vma(vma);
vma->bound = 0;
list_del_init(&vma->mm_list); list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) { if (i915_is_ggtt(vma->vm)) {
...@@ -3497,7 +3529,8 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, ...@@ -3497,7 +3529,8 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
} }
/** /**
* Finds free space in the GTT aperture and binds the object there. * Finds free space in the GTT aperture and binds the object or a view of it
* there.
*/ */
static struct i915_vma * static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
...@@ -3516,36 +3549,60 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3516,36 +3549,60 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) if (i915_is_ggtt(vm)) {
u32 view_size;
if (WARN_ON(!ggtt_view))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
view_size = i915_ggtt_view_size(obj, ggtt_view);
fence_size = i915_gem_get_gtt_size(dev,
view_size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
view_size,
obj->tiling_mode,
true);
unfenced_alignment = i915_gem_get_gtt_alignment(dev,
view_size,
obj->tiling_mode,
false);
size = flags & PIN_MAPPABLE ? fence_size : view_size;
} else {
fence_size = i915_gem_get_gtt_size(dev, fence_size = i915_gem_get_gtt_size(dev,
obj->base.size, obj->base.size,
obj->tiling_mode); obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev, fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size, obj->base.size,
obj->tiling_mode, true); obj->tiling_mode,
true);
unfenced_alignment = unfenced_alignment =
i915_gem_get_gtt_alignment(dev, i915_gem_get_gtt_alignment(dev,
obj->base.size, obj->base.size,
obj->tiling_mode, false); obj->tiling_mode,
false);
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
}
if (alignment == 0) if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment : alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment; unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
DRM_DEBUG("Invalid object alignment requested %u\n", alignment); DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
ggtt_view ? ggtt_view->type : 0,
alignment);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; /* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
/* If the object is bigger than the entire aperture, reject it early * attempt to find space.
* before evicting everything in a vain attempt to find space.
*/ */
if (obj->base.size > end) { if (size > end) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
obj->base.size, ggtt_view ? ggtt_view->type : 0,
size,
flags & PIN_MAPPABLE ? "mappable" : "total", flags & PIN_MAPPABLE ? "mappable" : "total",
end); end);
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
...@@ -3841,17 +3898,10 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3841,17 +3898,10 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{ {
struct drm_i915_gem_caching *args = data; struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) { if (&obj->base == NULL)
ret = -ENOENT; return -ENOENT;
goto unlock;
}
switch (obj->cache_level) { switch (obj->cache_level) {
case I915_CACHE_LLC: case I915_CACHE_LLC:
...@@ -3868,10 +3918,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3868,10 +3918,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
break; break;
} }
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference_unlocked(&obj->base);
unlock: return 0;
mutex_unlock(&dev->struct_mutex);
return ret;
} }
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
...@@ -4207,7 +4255,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, ...@@ -4207,7 +4255,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
return ret; return ret;
} }
if ((bound ^ vma->bound) & GLOBAL_BIND) { if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
(bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable; bool mappable, fenceable;
u32 fence_size, fence_alignment; u32 fence_size, fence_alignment;
...@@ -4226,9 +4275,9 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, ...@@ -4226,9 +4275,9 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
dev_priv->gtt.mappable_end); dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
}
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
}
vma->pin_count++; vma->pin_count++;
return 0; return 0;
...@@ -5226,13 +5275,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, ...@@ -5226,13 +5275,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) { list_for_each_entry(vma, &obj->vma_list, vma_link)
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->pin_count > 0) if (vma->pin_count > 0)
return true; return true;
}
return false; return false;
} }
...@@ -1540,29 +1540,39 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1540,29 +1540,39 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
if (i915_needs_cmd_parser(ring) && args->batch_len) { if (i915_needs_cmd_parser(ring) && args->batch_len) {
batch_obj = i915_gem_execbuffer_parse(ring, struct drm_i915_gem_object *parsed_batch_obj;
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry, &shadow_exec_entry,
eb, eb,
batch_obj, batch_obj,
args->batch_start_offset, args->batch_start_offset,
args->batch_len, args->batch_len,
file->is_master); file->is_master);
if (IS_ERR(batch_obj)) { if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(batch_obj); ret = PTR_ERR(parsed_batch_obj);
goto err; goto err;
} }
/* /*
* parsed_batch_obj == batch_obj means batch not fully parsed:
* Accept, but don't promote to secure.
*/
if (parsed_batch_obj != batch_obj) {
/*
* Batch parsed and accepted:
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE * Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in the * bit from MI_BATCH_BUFFER_START commands issued in
* dispatch_execbuffer implementations. We specifically * the dispatch_execbuffer implementations. We
* don't want that set when the command parser is * specifically don't want that set on batches the
* enabled. * command parser has accepted.
*/ */
if (USES_PPGTT(dev))
dispatch_flags |= I915_DISPATCH_SECURE; dispatch_flags |= I915_DISPATCH_SECURE;
exec_start = 0; exec_start = 0;
batch_obj = parsed_batch_obj;
}
} }
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
......
...@@ -756,8 +756,8 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, ...@@ -756,8 +756,8 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
/* FIXME: PPGTT container_of won't work for 64b */ /* FIXME: upper bound must not overflow 32 bits */
WARN_ON((start + length) > 0x800000000ULL); WARN_ON((start + length) >= (1ULL << 32));
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
if (pd) if (pd)
...@@ -844,15 +844,6 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, ...@@ -844,15 +844,6 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
uint32_t pdpe; uint32_t pdpe;
int ret; int ret;
#ifndef CONFIG_64BIT
/* Disallow 64b address on 32b platforms. Nothing is wrong with doing
* this in hardware, but a lot of the drm code is not prepared to handle
* 64b offset on 32b platforms.
* This will be addressed when 48b PPGTT is added */
if (start + length > 0x100000000ULL)
return -E2BIG;
#endif
/* Wrap is never okay since we can only represent 48b, and we don't /* Wrap is never okay since we can only represent 48b, and we don't
* actually use the other side of the canonical address space. * actually use the other side of the canonical address space.
*/ */
...@@ -1945,19 +1936,23 @@ static void ggtt_unbind_vma(struct i915_vma *vma) ...@@ -1945,19 +1936,23 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_device *dev = vma->vm->dev; struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
const uint64_t size = min_t(uint64_t,
obj->base.size,
vma->node.size);
if (vma->bound & GLOBAL_BIND) { if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm, vma->vm->clear_range(vma->vm,
vma->node.start, vma->node.start,
obj->base.size, size,
true); true);
} }
if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base, appgtt->base.clear_range(&appgtt->base,
vma->node.start, vma->node.start,
obj->base.size, size,
true); true);
} }
} }
...@@ -2758,6 +2753,47 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, ...@@ -2758,6 +2753,47 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
struct scatterlist *sg;
struct sg_page_iter obj_sg_iter;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
sg = st->sgl;
st->nents = 0;
for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
view->params.partial.offset)
{
if (st->nents >= view->params.partial.size)
break;
sg_set_page(sg, NULL, PAGE_SIZE, 0);
sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
sg_dma_len(sg) = PAGE_SIZE;
sg = sg_next(sg);
st->nents++;
}
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
return ERR_PTR(ret);
}
static int static int
i915_get_ggtt_vma_pages(struct i915_vma *vma) i915_get_ggtt_vma_pages(struct i915_vma *vma)
{ {
...@@ -2771,6 +2807,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) ...@@ -2771,6 +2807,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->ggtt_view.pages = vma->ggtt_view.pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->ggtt_view.pages =
intel_partial_pages(&vma->ggtt_view, vma->obj);
else else
WARN_ONCE(1, "GGTT view %u not implemented!\n", WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type); vma->ggtt_view.type);
...@@ -2843,3 +2882,25 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -2843,3 +2882,25 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return 0; return 0;
} }
/**
* i915_ggtt_view_size - Get the size of a GGTT view.
* @obj: Object the view is of.
* @view: The view in question.
*
* @return The size of the GGTT view in bytes.
*/
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
if (view->type == I915_GGTT_VIEW_NORMAL ||
view->type == I915_GGTT_VIEW_ROTATED) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
return obj->base.size;
}
}
...@@ -117,7 +117,8 @@ typedef uint64_t gen8_pde_t; ...@@ -117,7 +117,8 @@ typedef uint64_t gen8_pde_t;
enum i915_ggtt_view_type { enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0, I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED I915_GGTT_VIEW_ROTATED,
I915_GGTT_VIEW_PARTIAL,
}; };
struct intel_rotation_info { struct intel_rotation_info {
...@@ -130,6 +131,13 @@ struct intel_rotation_info { ...@@ -130,6 +131,13 @@ struct intel_rotation_info {
struct i915_ggtt_view { struct i915_ggtt_view {
enum i915_ggtt_view_type type; enum i915_ggtt_view_type type;
union {
struct {
unsigned long offset;
unsigned int size;
} partial;
} params;
struct sg_table *pages; struct sg_table *pages;
union { union {
...@@ -495,7 +503,15 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a, ...@@ -495,7 +503,15 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
if (WARN_ON(!a || !b)) if (WARN_ON(!a || !b))
return false; return false;
return a->type == b->type; if (a->type != b->type)
return false;
if (a->type == I915_GGTT_VIEW_PARTIAL)
return !memcmp(&a->params, &b->params, sizeof(a->params));
return true;
} }
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
#endif #endif
...@@ -554,6 +554,7 @@ static void i915_error_state_free(struct kref *error_ref) ...@@ -554,6 +554,7 @@ static void i915_error_state_free(struct kref *error_ref)
for (i = 0; i < ARRAY_SIZE(error->ring); i++) { for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer); i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].wa_batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer); i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page); i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx); i915_error_object_free(error->ring[i].ctx);
......
...@@ -53,6 +53,7 @@ struct i915_params i915 __read_mostly = { ...@@ -53,6 +53,7 @@ struct i915_params i915 __read_mostly = {
.mmio_debug = 0, .mmio_debug = 0,
.verbose_state_checks = 1, .verbose_state_checks = 1,
.nuclear_pageflip = 0, .nuclear_pageflip = 0,
.edp_vswing = 0,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
...@@ -184,3 +185,10 @@ MODULE_PARM_DESC(verbose_state_checks, ...@@ -184,3 +185,10 @@ MODULE_PARM_DESC(verbose_state_checks,
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600); module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
MODULE_PARM_DESC(nuclear_pageflip, MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; only planes work for now (default: false)."); "Force atomic modeset functionality; only planes work for now (default: false).");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
MODULE_PARM_DESC(edp_vswing,
"Ignore/Override vswing pre-emph table selection from VBT "
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
...@@ -670,6 +670,12 @@ enum skl_disp_power_wells { ...@@ -670,6 +670,12 @@ enum skl_disp_power_wells {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
#define VLV_TURBO_SOC_OVERRIDE 0x04
#define VLV_OVERRIDE_EN 1
#define VLV_SOC_TDP_EN (1 << 1)
#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 #define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
/* vlv2 north clock has */ /* vlv2 north clock has */
...@@ -955,6 +961,7 @@ enum skl_disp_power_wells { ...@@ -955,6 +961,7 @@ enum skl_disp_power_wells {
#define _VLV_PCS_DW11_CH0 0x822c #define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c #define _VLV_PCS_DW11_CH1 0x842c
#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3) #define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1) #define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0) #define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
...@@ -967,8 +974,20 @@ enum skl_disp_power_wells { ...@@ -967,8 +974,20 @@ enum skl_disp_power_wells {
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1) #define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1) #define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
#define _VLV_PCS01_DW12_CH0 0x0230
#define _VLV_PCS23_DW12_CH0 0x0430
#define _VLV_PCS01_DW12_CH1 0x2630
#define _VLV_PCS23_DW12_CH1 0x2830
#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
#define _VLV_PCS_DW12_CH0 0x8230 #define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430 #define _VLV_PCS_DW12_CH1 0x8430
#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238 #define _VLV_PCS_DW14_CH0 0x8238
...@@ -2118,6 +2137,9 @@ enum skl_disp_power_wells { ...@@ -2118,6 +2137,9 @@ enum skl_disp_power_wells {
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) #define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf) #define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) #define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) #define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) #define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) #define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
...@@ -3480,6 +3502,18 @@ enum skl_disp_power_wells { ...@@ -3480,6 +3502,18 @@ enum skl_disp_power_wells {
#define UTIL_PIN_CTL 0x48400 #define UTIL_PIN_CTL 0x48400
#define UTIL_PIN_ENABLE (1 << 31) #define UTIL_PIN_ENABLE (1 << 31)
/* BXT backlight register definition. */
#define BXT_BLC_PWM_CTL1 0xC8250
#define BXT_BLC_PWM_ENABLE (1 << 31)
#define BXT_BLC_PWM_POLARITY (1 << 29)
#define BXT_BLC_PWM_FREQ1 0xC8254
#define BXT_BLC_PWM_DUTY1 0xC8258
#define BXT_BLC_PWM_CTL2 0xC8350
#define BXT_BLC_PWM_FREQ2 0xC8354
#define BXT_BLC_PWM_DUTY2 0xC8358
#define PCH_GTC_CTL 0xe7000 #define PCH_GTC_CTL 0xe7000
#define PCH_GTC_ENABLE (1 << 31) #define PCH_GTC_ENABLE (1 << 31)
...@@ -5700,7 +5734,7 @@ enum skl_disp_power_wells { ...@@ -5700,7 +5734,7 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT 0x46408 #define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) #define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define FF_SLICE_CS_CHICKEN2 0x02e4 #define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
/* GEN7 chicken */ /* GEN7 chicken */
...@@ -6638,15 +6672,20 @@ enum skl_disp_power_wells { ...@@ -6638,15 +6672,20 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31) #define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4 #define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5 #define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19 #define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
...@@ -6655,12 +6694,6 @@ enum skl_disp_power_wells { ...@@ -6655,12 +6694,6 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C #define GEN6_PCODE_DATA1 0x13812C
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define GEN6_GT_CORE_STATUS 0x138060 #define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4) #define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7 #define GEN6_RCn_MASK 7
...@@ -6721,6 +6754,7 @@ enum skl_disp_power_wells { ...@@ -6721,6 +6754,7 @@ enum skl_disp_power_wells {
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 #define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12) #define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) #define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN9_HALF_SLICE_CHICKEN5 0xe188 #define GEN9_HALF_SLICE_CHICKEN5 0xe188
...@@ -7135,16 +7169,16 @@ enum skl_disp_power_wells { ...@@ -7135,16 +7169,16 @@ enum skl_disp_power_wells {
#define DPLL_CTRL1 0x6C058 #define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5)) #define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4)) #define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1)) #define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1) #define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1)) #define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6)) #define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
#define DPLL_CRTL1_LINK_RATE_2700 0 #define DPLL_CTRL1_LINK_RATE_2700 0
#define DPLL_CRTL1_LINK_RATE_1350 1 #define DPLL_CTRL1_LINK_RATE_1350 1
#define DPLL_CRTL1_LINK_RATE_810 2 #define DPLL_CTRL1_LINK_RATE_810 2
#define DPLL_CRTL1_LINK_RATE_1620 3 #define DPLL_CTRL1_LINK_RATE_1620 3
#define DPLL_CRTL1_LINK_RATE_1080 4 #define DPLL_CTRL1_LINK_RATE_1080 4
#define DPLL_CRTL1_LINK_RATE_2160 5 #define DPLL_CTRL1_LINK_RATE_2160 5
/* DPLL control2 */ /* DPLL control2 */
#define DPLL_CTRL2 0x6C05C #define DPLL_CTRL2 0x6C05C
...@@ -7204,6 +7238,17 @@ enum skl_disp_power_wells { ...@@ -7204,6 +7238,17 @@ enum skl_disp_power_wells {
#define DC_STATE_EN_UPTO_DC5 (1<<0) #define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_DC9 (1<<3) #define DC_STATE_EN_DC9 (1<<3)
/*
* SKL DC
*/
#define DC_STATE_EN 0x45504
#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_UPTO_DC6 (2<<0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
#define DC_STATE_DEBUG 0x45520
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */ * since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) #define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
......
...@@ -169,7 +169,7 @@ int intel_atomic_commit(struct drm_device *dev, ...@@ -169,7 +169,7 @@ int intel_atomic_commit(struct drm_device *dev,
plane->state->state = NULL; plane->state->state = NULL;
} }
/* swap crtc_state */ /* swap crtc_scaler_state */
for (i = 0; i < dev->mode_config.num_crtc; i++) { for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct drm_crtc *crtc = state->crtcs[i]; struct drm_crtc *crtc = state->crtcs[i];
if (!crtc) { if (!crtc) {
...@@ -178,6 +178,9 @@ int intel_atomic_commit(struct drm_device *dev, ...@@ -178,6 +178,9 @@ int intel_atomic_commit(struct drm_device *dev,
to_intel_crtc(crtc)->config->scaler_state = to_intel_crtc(crtc)->config->scaler_state =
to_intel_crtc_state(state->crtc_states[i])->scaler_state; to_intel_crtc_state(state->crtc_states[i])->scaler_state;
if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(to_intel_crtc(crtc));
} }
drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_commit_planes(dev, state);
...@@ -247,7 +250,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) ...@@ -247,7 +250,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state = kmemdup(intel_crtc->config, crtc_state = kmemdup(intel_crtc->config,
sizeof(*intel_crtc->config), GFP_KERNEL); sizeof(*intel_crtc->config), GFP_KERNEL);
if (crtc_state) if (!crtc_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
crtc_state->base.crtc = crtc; crtc_state->base.crtc = crtc;
return &crtc_state->base; return &crtc_state->base;
......
...@@ -85,8 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane) ...@@ -85,8 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
return NULL; return NULL;
state = &intel_state->base; state = &intel_state->base;
if (state->fb)
drm_framebuffer_reference(state->fb); __drm_atomic_helper_plane_duplicate_state(plane, state);
return state; return state;
} }
...@@ -111,6 +111,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane, ...@@ -111,6 +111,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
{ {
struct drm_crtc *crtc = state->crtc; struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc; struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state); struct intel_plane_state *intel_state = to_intel_plane_state(state);
...@@ -126,6 +127,17 @@ static int intel_plane_atomic_check(struct drm_plane *plane, ...@@ -126,6 +127,17 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc) if (!crtc)
return 0; return 0;
/* FIXME: temporary hack necessary while we still use the plane update
* helper. */
if (state->state) {
crtc_state =
intel_atomic_get_crtc_state(state->state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
} else {
crtc_state = intel_crtc->config;
}
/* /*
* The original src/dest coordinates are stored in state->base, but * The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can * we want to keep another copy internal to our driver that we can
...@@ -144,9 +156,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, ...@@ -144,9 +156,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0; intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0; intel_state->clip.y1 = 0;
intel_state->clip.x2 = intel_state->clip.x2 =
intel_crtc->active ? intel_crtc->config->pipe_src_w : 0; crtc_state->base.active ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 = intel_state->clip.y2 =
intel_crtc->active ? intel_crtc->config->pipe_src_h : 0; crtc_state->base.active ? crtc_state->pipe_src_h : 0;
/* /*
* Disabling a plane is always okay; we just need to update * Disabling a plane is always okay; we just need to update
......
...@@ -269,6 +269,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) ...@@ -269,6 +269,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe)); port_name(port), pipe_name(pipe));
if (WARN_ON(port == PORT_A))
return;
if (HAS_PCH_IBX(dev_priv->dev)) { if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe); aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2; aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
...@@ -290,12 +293,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) ...@@ -290,12 +293,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_VALUE_INDEX; tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp); I915_WRITE(aud_config, tmp);
if (WARN_ON(!port)) {
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
IBX_ELD_VALID(PORT_D);
} else {
eldv = IBX_ELD_VALID(port); eldv = IBX_ELD_VALID(port);
}
/* Invalidate ELD */ /* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2); tmp = I915_READ(aud_cntrl_st2);
...@@ -325,6 +323,9 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, ...@@ -325,6 +323,9 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld)); port_name(port), pipe_name(pipe), drm_eld_size(eld));
if (WARN_ON(port == PORT_A))
return;
/* /*
* FIXME: We're supposed to wait for vblank here, but we have vblanks * FIXME: We're supposed to wait for vblank here, but we have vblanks
* disabled during the mode set. The proper fix would be to push the * disabled during the mode set. The proper fix would be to push the
...@@ -349,12 +350,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, ...@@ -349,12 +350,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
} }
if (WARN_ON(!port)) {
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
IBX_ELD_VALID(PORT_D);
} else {
eldv = IBX_ELD_VALID(port); eldv = IBX_ELD_VALID(port);
}
/* Invalidate ELD */ /* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2); tmp = I915_READ(aud_cntrl_st2);
......
...@@ -672,8 +672,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -672,8 +672,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
if (bdb->version >= 173) { if (bdb->version >= 173) {
uint8_t vswing; uint8_t vswing;
/* Don't read from VBT if module parameter has valid value*/
if (i915.edp_vswing) {
dev_priv->edp_low_vswing = i915.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp_low_vswing = vswing == 0; dev_priv->edp_low_vswing = vswing == 0;
}
} }
} }
......
This diff is collapsed.
...@@ -282,7 +282,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ...@@ -282,7 +282,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
ddi_translations_fdi = NULL; ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp; ddi_translations_dp = skl_ddi_translations_dp;
n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp); n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
if (dev_priv->vbt.edp_low_vswing) { if (dev_priv->edp_low_vswing) {
ddi_translations_edp = skl_ddi_translations_edp; ddi_translations_edp = skl_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp); n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else { } else {
...@@ -584,17 +584,18 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) ...@@ -584,17 +584,18 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *ret = NULL; struct intel_encoder *ret = NULL;
struct drm_atomic_state *state; struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int num_encoders = 0; int num_encoders = 0;
int i; int i;
state = crtc_state->base.state; state = crtc_state->base.state;
for (i = 0; i < state->num_connector; i++) { for_each_connector_in_state(state, connector, connector_state, i) {
if (!state->connectors[i] || if (connector_state->crtc != crtc_state->base.crtc)
state->connector_states[i]->crtc != crtc_state->base.crtc)
continue; continue;
ret = to_intel_encoder(state->connector_states[i]->best_encoder); ret = to_intel_encoder(connector_state->best_encoder);
num_encoders++; num_encoders++;
} }
...@@ -870,26 +871,26 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, ...@@ -870,26 +871,26 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) { if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
link_clock = skl_calc_wrpll_link(dev_priv, dpll); link_clock = skl_calc_wrpll_link(dev_priv, dpll);
} else { } else {
link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll); link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll); link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
switch (link_clock) { switch (link_clock) {
case DPLL_CRTL1_LINK_RATE_810: case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000; link_clock = 81000;
break; break;
case DPLL_CRTL1_LINK_RATE_1080: case DPLL_CTRL1_LINK_RATE_1080:
link_clock = 108000; link_clock = 108000;
break; break;
case DPLL_CRTL1_LINK_RATE_1350: case DPLL_CTRL1_LINK_RATE_1350:
link_clock = 135000; link_clock = 135000;
break; break;
case DPLL_CRTL1_LINK_RATE_1620: case DPLL_CTRL1_LINK_RATE_1620:
link_clock = 162000; link_clock = 162000;
break; break;
case DPLL_CRTL1_LINK_RATE_2160: case DPLL_CTRL1_LINK_RATE_2160:
link_clock = 216000; link_clock = 216000;
break; break;
case DPLL_CRTL1_LINK_RATE_2700: case DPLL_CTRL1_LINK_RATE_2700:
link_clock = 270000; link_clock = 270000;
break; break;
default: default:
...@@ -1294,13 +1295,13 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1294,13 +1295,13 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
switch (intel_dp->link_bw) { switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62: case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0); ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break; break;
case DP_LINK_BW_2_7: case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0); ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
break; break;
case DP_LINK_BW_5_4: case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0); ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
break; break;
} }
...@@ -1854,7 +1855,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) ...@@ -1854,7 +1855,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) | DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll)); DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6); val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val); I915_WRITE(DPLL_CTRL1, val);
...@@ -2100,7 +2101,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, ...@@ -2100,7 +2101,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
val = I915_READ(DPLL_CTRL1); val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) | val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll)); DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= pll->config.hw_state.ctrl1 << (dpll * 6); val |= pll->config.hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val); I915_WRITE(DPLL_CTRL1, val);
......
This diff is collapsed.
This diff is collapsed.
...@@ -40,7 +40,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, ...@@ -40,7 +40,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
int bpp, i; int bpp, i;
int lane_count, slots, rate; int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_connector *found = NULL; struct drm_connector *drm_connector;
struct intel_connector *connector, *found = NULL;
struct drm_connector_state *connector_state;
int mst_pbn; int mst_pbn;
pipe_config->dp_encoder_is_mst = true; pipe_config->dp_encoder_is_mst = true;
...@@ -70,12 +72,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, ...@@ -70,12 +72,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
state = pipe_config->base.state; state = pipe_config->base.state;
for (i = 0; i < state->num_connector; i++) { for_each_connector_in_state(state, drm_connector, connector_state, i) {
if (!state->connectors[i]) connector = to_intel_connector(drm_connector);
continue;
if (state->connector_states[i]->best_encoder == &encoder->base) { if (connector_state->best_encoder == &encoder->base) {
found = to_intel_connector(state->connectors[i]); found = connector;
break; break;
} }
} }
......
This diff is collapsed.
...@@ -457,7 +457,7 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv) ...@@ -457,7 +457,7 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe]; tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (intel_crtc_active(tmp_crtc) && if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) { to_intel_plane_state(tmp_crtc->primary->state)->visible) {
if (one_pipe_only && crtc) { if (one_pipe_only && crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
......
This diff is collapsed.
...@@ -1895,10 +1895,9 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, ...@@ -1895,10 +1895,9 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
context_size = round_up(get_lr_context_size(ring), 4096); context_size = round_up(get_lr_context_size(ring), 4096);
ctx_obj = i915_gem_alloc_object(dev, context_size); ctx_obj = i915_gem_alloc_object(dev, context_size);
if (IS_ERR(ctx_obj)) { if (!ctx_obj) {
ret = PTR_ERR(ctx_obj); DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret); return -ENOMEM;
return ret;
} }
if (is_global_default_ctx) { if (is_global_default_ctx) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment