Commit fffb6751 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel into drm-next

- more userptr cornercase fixes from Chris
- clean up and tune forcewake handling (Tvrtko)
- more underrun fixes from Ville, mostly for ilk to appeas CI
- fix unclaimed register warnings on vlv/chv and enable the debug code to catch
  them by default (Ville)
- skl gpu hang fixes for gt3/4 (Mika Kuoppala)
- edram improvements for gen9+ (Mika again)
- clean up gpu reset corner cases (Chris)
- fix ctx/ring machine deaths on snb/ilk (Chris)
- MOCS programming for all engines (Peter Antoine)
- robustify/clean up vlv/chv irq handler (Ville)
- split gen8+ irq handlers into ack/handle phase (Ville)
- tons of bxt rpm fixes (mostly around firmware interactions), from Imre
- hook up panel fitting for dsi panels (Ville)
- more runtime PM fixes all over from Imre
- shrinker polish (Chris)
- more guc fixes from Alex Dai and Dave Gordon
- tons of bugfixes and small polish all over (but with a big focus on bxt)

* tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel: (142 commits)
  drm/i915: Update DRIVER_DATE to 20160425
  drm/i915/bxt: Explicitly clear the Turbo control register
  drm/i915: Correct the i915_frequency_info debugfs output
  drm/i915: Macros to convert PM time interval values to microseconds
  drm/i915: Make RPS EI/thresholds multiple of 25 on SNB-BDW
  drm/i915: Fake HDMI live status
  drm/i915/bxt: Force reprogramming a PHY with invalid HW state
  drm/i915/bxt: Wait for PHY1 GRC done if PHY0 was already enabled
  drm/i915/bxt: Use PHY0 GRC value for HW state verification
  drm/i915: use dev_priv directly in gen8_ppgtt_notify_vgt
  drm/i915/bxt: Enable DC5 during runtime resume
  drm/i915/bxt: Sanitize DC state tracking during system resume
  drm/i915/bxt: Don't uninit/init display core twice during system suspend/resume
  drm/i915: Inline intel_suspend_complete
  drm/i915/kbl: Don't WARN for expected secondary MISC IO power well request
  drm/i915: Fix eDP low vswing for Broadwell
  drm/i915: check for ERR_PTR from i915_gem_object_pin_map()
  drm/i915/guc: local optimisations and updating comments
  drm/i915/guc: drop cached copy of 'wq_head'
  drm/i915/guc: keep GuC doorbell & process descriptor mapped in kernel
  ...
parents b89359bd 5b4fd5b1
config DRM_I915_WERROR
bool "Force GCC to throw an error instead of a warning when compiling"
# As this may inadvertently break the build, only allow the user
# to shoot oneself in the foot iff they aim really hard
depends on EXPERT
# We use the dependency on !COMPILE_TEST to not be enabled in
# allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
default n
help
Add -Werror to the build flags for (and only for) i915.ko.
Do not enable this unless you are writing code for the i915.ko module.
Recommended for driver developers only.
If in doubt, say "N".
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
......@@ -10,3 +27,15 @@ config DRM_I915_DEBUG
If in doubt, say "N".
config DRM_I915_DEBUG_GEM
bool "Insert extra checks into the GEM internals"
default n
depends on DRM_I915_WERROR
help
Enable extra sanity checks (including BUGs) along the GEM driver
paths that may slow the system down and if hit hang the machine.
Recommended for driver developers only.
If in doubt, say "N".
......@@ -2,6 +2,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
# Please keep these build lists sorted!
# core driver code
......
......@@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
static const char get_active_flag(struct drm_i915_gem_object *obj)
{
if (obj->pin_display)
return "p";
else
return " ";
return obj->active ? '*' : ' ';
}
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static const char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_display ? 'p' : ' ';
}
static const char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
case I915_TILING_Y: return "Y";
case I915_TILING_NONE: return ' ';
case I915_TILING_X: return 'X';
case I915_TILING_Y: return 'Y';
}
}
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
static inline const char get_global_flag(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}
static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
......@@ -136,12 +143,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
lockdep_assert_held(&obj->base.dev->struct_mutex);
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
&obj->base,
obj->active ? "*" : " ",
get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
......@@ -435,6 +443,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
struct drm_i915_gem_object *obj;
struct drm_file *file;
struct i915_vma *vma;
......@@ -468,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size += obj->base.size, ++count;
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
if (obj->mapping) {
pin_mapped_count++;
pin_mapped_size += obj->base.size;
if (obj->pages_pin_count == 0) {
pin_mapped_purgeable_count++;
pin_mapped_purgeable_size += obj->base.size;
}
}
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
......@@ -485,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
purgeable_size += obj->base.size;
++purgeable_count;
}
if (obj->mapping) {
pin_mapped_count++;
pin_mapped_size += obj->base.size;
if (obj->pages_pin_count == 0) {
pin_mapped_purgeable_count++;
pin_mapped_purgeable_size += obj->base.size;
}
}
}
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
......@@ -492,6 +518,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
seq_printf(m,
"%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
pin_mapped_count, pin_mapped_purgeable_count,
pin_mapped_size, pin_mapped_purgeable_size);
seq_printf(m, "%llu [%llu] gtt total\n",
ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
......@@ -1216,12 +1246,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
rpprevup = I915_READ(GEN6_RP_PREV_UP);
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
if (IS_GEN9(dev))
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
......@@ -1261,21 +1291,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
seq_printf(m, "RP CUR UP: %d (%dus)\n",
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
dev_priv->rps.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
......@@ -1469,12 +1499,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore_forcewake_domain *fw_domain;
int i;
spin_lock_irq(&dev_priv->uncore.lock);
for_each_fw_domain(fw_domain, dev_priv, i) {
for_each_fw_domain(fw_domain, dev_priv) {
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(i),
intel_uncore_forcewake_domain_to_str(fw_domain->id),
fw_domain->wake_count);
}
spin_unlock_irq(&dev_priv->uncore.lock);
......@@ -2405,10 +2434,11 @@ static int i915_llc(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool edram = INTEL_GEN(dev_priv) > 8;
/* Size calculation for LLC is a bit of a pain. Ignore for now. */
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
intel_uncore_edram_size(dev_priv)/1024/1024);
return 0;
}
......@@ -4723,7 +4753,7 @@ i915_wedged_get(void *data, u64 *val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
*val = atomic_read(&dev_priv->gpu_error.reset_counter);
*val = i915_terminally_wedged(&dev_priv->gpu_error);
return 0;
}
......
......@@ -257,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4*4096)
#define DEVEN_REG 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
......@@ -325,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
......@@ -343,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev)
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
......@@ -356,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
temp &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
&deven_val);
deven_val &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
deven_val);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
temp &= ~1;
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
u32 mchbar_val;
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
&mchbar_val);
mchbar_val &= ~1;
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
mchbar_val);
}
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_GEM_H__
#define __I915_GEM_H__
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
#define GEM_BUG_ON(expr)
#endif
#endif /* __I915_GEM_H__ */
This diff is collapsed.
......@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
}
}
void
static void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
......@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
return 0;
}
......@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
goto err_request;
/*
* Save assorted stuff away to pass through to *_submission().
......@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
err_request:
i915_gem_execbuffer_retire_commands(params);
err_batch_unpin:
/*
......@@ -1657,14 +1658,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_context_unreference(ctx);
eb_destroy(eb);
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && !IS_ERR_OR_NULL(req))
i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
......
......@@ -745,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
num_entries--;
}
kunmap_px(ppgtt, pt);
kunmap_px(ppgtt, pt_vaddr);
pte = 0;
if (++pde == I915_PDES) {
......@@ -905,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
if (USES_FULL_48BIT_PPGTT(dev)) {
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
......@@ -3172,7 +3171,8 @@ int i915_ggtt_init_hw(struct drm_device *dev)
} else if (INTEL_INFO(dev)->gen < 8) {
ggtt->probe = gen6_gmch_probe;
ggtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
if (HAS_EDRAM(dev))
ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
ggtt->base.pte_encode = hsw_pte_encode;
......
......@@ -70,6 +70,10 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
/* Only shmemfs objects are backed by swap */
if (!obj->base.filp)
return false;
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
......@@ -336,7 +340,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj;
unsigned long pinned, bound, unbound, freed_pages;
unsigned long unevictable, bound, unbound, freed_pages;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
......@@ -347,33 +351,28 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
unbound = bound = pinned = 0;
unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
if (!obj->base.filp) /* not backed by a freeable object */
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size;
unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!obj->base.filp)
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size;
bound += obj->base.size >> PAGE_SHIFT;
}
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
freed_pages << PAGE_SHIFT, pinned);
pr_info("Purging GPU memory, %lu pages freed, "
"%lu pages still pinned.\n",
freed_pages, unevictable);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
pr_err("%lu and %lu pages still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
......
......@@ -95,9 +95,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 base;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space. On a few (desktop)
* machines this is also mirrored in the bridge device at different
* locations, or in the MCHBAR.
* at register BSM (0x5c) in the igfx configuration space. On a few
* (desktop) machines this is also mirrored in the bridge device at
* different locations, or in the MCHBAR.
*
* On 865 we just check the TOUD register.
*
......@@ -107,9 +107,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
u32 bsm;
pci_read_config_dword(dev->pdev, BSM, &bsm);
base = bsm & BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;
......
......@@ -34,7 +34,7 @@
struct i915_mm_struct {
struct mm_struct *mm;
struct drm_device *dev;
struct drm_i915_private *i915;
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
......@@ -49,6 +49,7 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
struct workqueue_struct *wq;
};
struct i915_mmu_object {
......@@ -60,6 +61,37 @@ struct i915_mmu_object {
bool attached;
};
static void wait_rendering(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int i, n;
if (!obj->active)
return;
n = 0;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
if (req == NULL)
continue;
requests[n++] = i915_gem_request_reference(req);
}
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < n; i++)
__i915_wait_request(requests[i], false, NULL, NULL);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++)
i915_gem_request_unreference(requests[i]);
}
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
......@@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work)
struct i915_vma *vma, *tmp;
bool was_interruptible;
wait_rendering(obj);
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
int ret = i915_vma_unbind(vma);
WARN_ON(ret && ret != -EIO);
}
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
WARN_ON(i915_vma_unbind(vma));
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;
......@@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
*/
mo = container_of(it, struct i915_mmu_object, it);
if (kref_get_unless_zero(&mo->obj->base.refcount))
schedule_work(&mo->work);
queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled);
it = interval_tree_iter_next(it, start, end);
......@@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
spin_unlock(&mn->lock);
flush_workqueue(mn->wq);
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
......@@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT;
mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
}
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
destroy_workqueue(mn->wq);
kfree(mn);
return ERR_PTR(ret);
}
......@@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
return mn;
down_write(&mm->mm->mmap_sem);
mutex_lock(&to_i915(mm->dev)->mm_lock);
mutex_lock(&mm->i915->mm_lock);
if ((mn = mm->mn) == NULL) {
mn = i915_mmu_notifier_create(mm->mm);
if (!IS_ERR(mn))
mm->mn = mn;
}
mutex_unlock(&to_i915(mm->dev)->mm_lock);
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
return mn;
......@@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
destroy_workqueue(mn->wq);
kfree(mn);
}
......@@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
}
kref_init(&mm->kref);
mm->dev = obj->base.dev;
mm->i915 = to_i915(obj->base.dev);
mm->mm = current->mm;
atomic_inc(&current->mm->mm_count);
......@@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref)
/* Protected by dev_priv->mm_lock */
hash_del(&mm->node);
mutex_unlock(&to_i915(mm->dev)->mm_lock);
mutex_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work);
......@@ -498,19 +539,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
!obj->userptr.read_only, 0,
pvec + pinned, NULL);
if (ret < 0)
break;
pinned += ret;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
!obj->userptr.read_only, 0,
pvec + pinned, NULL);
if (ret < 0)
break;
pinned += ret;
}
up_read(&mm->mmap_sem);
mmput(mm);
}
up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);
......
......@@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_doorbell_info *doorbell;
void *base;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
doorbell = base + client->doorbell_offset;
doorbell = client->client_base + client->doorbell_offset;
doorbell->db_status = 1;
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
kunmap_atomic(base);
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
......@@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
void *base;
int attempt = 2, ret = -EAGAIN;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
desc = gc->client_base + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
......@@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
db = base + gc->doorbell_offset;
db = gc->client_base + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
......@@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
/* Finally, update the cached copy of the GuC's WQ head */
gc->wq_head = desc->head;
kunmap_atomic(base);
return ret;
}
......@@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc,
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell;
void *base;
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
doorbell = base + client->doorbell_offset;
doorbell->db_status = 0;
doorbell = client->client_base + client->doorbell_offset;
kunmap_atomic(base);
doorbell->db_status = GUC_DOORBELL_DISABLED;
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
......@@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_process_desc *desc;
void *base;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
desc = base + client->proc_desc_offset;
desc = client->client_base + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
......@@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
kunmap_atomic(base);
}
/*
......@@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
enum intel_engine_id id;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
......@@ -410,16 +394,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[id].ringbuf->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_begin = gfx_addr;
lrc->ring_end = gfx_addr + obj->base.size - 1;
lrc->ring_next_free_location = gfx_addr;
lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << engine->guc_id);
......@@ -428,22 +413,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
WARN_ON(desc.engines_used == 0);
/*
* The CPU address is only needed at certain points, so kmap_atomic on
* demand instead of storing it in the ctx descriptor.
* XXX: May make debug easier to have it mapped
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
desc.db_trigger_cpu = 0;
desc.db_trigger_uk = client->doorbell_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
desc.db_trigger_phy = client->doorbell_offset +
sg_dma_address(client->client_obj->pages->sgl);
desc.process_desc = client->proc_desc_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
desc.wq_addr = client->wq_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
client->doorbell_offset;
desc.db_trigger_cpu = (uintptr_t)client->client_base +
client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
desc.wq_size = client->wq_size;
/*
......@@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
/* Quickly return if wq space is available since last time we cache the
* head position. */
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
return 0;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
desc = gc->client_base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
gc->wq_head = desc->head;
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
ret = 0;
break;
}
......@@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
usleep_range(1000, 2000);
};
kunmap_atomic(base);
return ret;
}
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off, space;
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
desc = gc->client_base + gc->proc_desc_offset;
space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
......@@ -661,21 +632,28 @@ static void guc_client_free(struct drm_device *dev,
if (!client)
return;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
/*
* First disable the doorbell, then tell the GuC we've
* finished with it, finally deallocate it in our bitmap
*/
guc_disable_doorbell(guc, client);
host2guc_release_doorbell(guc, client);
release_doorbell(guc, client->doorbell_id);
}
/*
* XXX: wait for any outstanding submissions before freeing memory.
* Be sure to drop any locks
*/
if (client->client_base) {
/*
* If we got as far as setting up a doorbell, make sure
* we shut it down before unmapping & deallocating the
* memory. So first disable the doorbell, then tell the
* GuC that we've finished with it, finally deallocate
* it in our bitmap
*/
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
guc_disable_doorbell(guc, client);
host2guc_release_doorbell(guc, client);
release_doorbell(guc, client->doorbell_id);
}
kunmap(kmap_to_page(client->client_base));
}
gem_release_guc_obj(client->client_obj);
if (client->ctx_index != GUC_INVALID_CTX_ID) {
......@@ -696,7 +674,7 @@ static void guc_client_free(struct drm_device *dev,
* @ctx: the context that owns the client (we use the default render
* context)
*
* Return: An i915_guc_client object if success.
* Return: An i915_guc_client object if success, else NULL.
*/
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
uint32_t priority,
......@@ -728,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
if (!obj)
goto err;
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->client_obj = obj;
client->client_base = kmap(i915_gem_object_get_page(obj, 0));
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
......
This diff is collapsed.
......@@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* PCI config space */
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4 * 4096)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
#define BSM 0x5c
#define BSM_MASK (0xFFFF << 20)
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
......@@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GC_CLOCK_166_266 (6 << 0)
#define GC_CLOCK_166_250 (7 << 0)
#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0 << 2)
#define GRDOM_RENDER (1 << 2)
#define GRDOM_MEDIA (3 << 2)
#define GRDOM_MASK (3 << 2)
#define GRDOM_RESET_STATUS (1 << 1)
#define GRDOM_RESET_ENABLE (1 << 0)
#define GCDGMBUS 0xcc
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
......@@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define GCDGMBUS 0xcc
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
#define ASLE 0xe4
#define ASLS 0xfc
#define SWSCI 0xe8
#define SWSCI_SCISEL (1 << 15)
#define SWSCI_GSSCIE (1 << 0)
#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
/* Graphics reset regs */
#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GRDOM_FULL (0<<1)
......@@ -1375,14 +1393,10 @@ enum skl_disp_power_wells {
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
/*
* FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
* after testing.
*/
#define GRC_CODE_SHIFT 23
#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
#define GRC_CODE_SHIFT 24
#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
#define GRC_CODE_FAST_SHIFT 16
#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
......@@ -2934,7 +2948,14 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
* 8300) freezing up around GPU hangs. Looks as if even
* scheduling/timer interrupts start misbehaving if the RPS
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
......@@ -2943,6 +2964,15 @@ enum skl_disp_power_wells {
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
(IS_BROXTON(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \
INTERVAL_1_28_TO_US(interval))
/*
* Logical Context regs
*/
......@@ -6866,6 +6896,8 @@ enum skl_disp_power_wells {
#define VLV_SPAREG2H _MMIO(0xA194)
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
#define GT_FIFO_SBDROPERR (1<<6)
#define GT_FIFO_BLOBDROPERR (1<<5)
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
......@@ -6882,8 +6914,11 @@ enum skl_disp_power_wells {
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT _MMIO(0x120010)
#define HSW_EDRAM_CAP _MMIO(0x120010)
#define EDRAM_ENABLED 0x1
#define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf)
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
......@@ -7161,6 +7196,7 @@ enum skl_disp_power_wells {
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
......
......@@ -58,8 +58,6 @@
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
static int panel_type;
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
{
......@@ -205,17 +203,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int panel_type;
int drrs_mode;
int ret;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
panel_type = lvds_options->panel_type;
ret = intel_opregion_get_panel_type(dev_priv->dev);
if (ret >= 0) {
WARN_ON(ret > 0xf);
panel_type = ret;
DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
} else {
if (lvds_options->panel_type > 0xf) {
DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
lvds_options->panel_type);
return;
}
panel_type = lvds_options->panel_type;
DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
}
dev_priv->vbt.panel_type = panel_type;
drrs_mode = (lvds_options->dps_panel_type_bits
>> (panel_type * 2)) & MODE_MASK;
......@@ -251,7 +264,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
......@@ -266,7 +279,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
panel_type);
if (fp_timing) {
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
......@@ -284,6 +297,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
int panel_type = dev_priv->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
......@@ -546,6 +560,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_link_params *edp_link_params;
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
......@@ -657,6 +672,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
int panel_type = dev_priv->vbt.panel_type;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
......@@ -703,6 +719,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
int panel_type = dev_priv->vbt.panel_type;
/* parse MIPI blocks only if LFP type is MIPI */
if (!intel_bios_is_dsi_present(dev_priv, NULL))
......@@ -910,6 +927,7 @@ static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
int panel_type = dev_priv->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
......
......@@ -50,6 +50,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
......@@ -281,6 +282,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
uint32_t required_min_version;
if (!fw)
return NULL;
......@@ -296,15 +298,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED;
} else {
MISSING_CASE(INTEL_REVID(dev_priv));
required_min_version = 0;
}
if (csr->version < required_min_version) {
DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
CSR_VERSION_MAJOR(required_min_version),
CSR_VERSION_MINOR(required_min_version));
return NULL;
}
......@@ -456,11 +466,51 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
schedule_work(&dev_priv->csr.work);
}
/**
* intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
* @dev_priv: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
{
if (!HAS_CSR(dev_priv))
return;
flush_work(&dev_priv->csr.work);
/* Drop the reference held in case DMC isn't loaded. */
if (!dev_priv->csr.dmc_payload)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
}
/**
* intel_csr_ucode_resume() - init CSR firmware during system resume
* @dev_priv: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
* resources released in intel_csr_ucode_suspend().
*/
void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
{
if (!HAS_CSR(dev_priv))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
if (!dev_priv->csr.dmc_payload)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev_priv: i915 drm device.
*
* Firmmware unloading includes freeing the internal momory and reset the
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
......@@ -468,7 +518,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
flush_work(&dev_priv->csr.work);
intel_csr_ucode_suspend(dev_priv);
kfree(dev_priv->csr.dmc_payload);
}
......@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
if (dev_priv->vbt.edp.low_vswing) {
ddi_translations_edp = bdw_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
} else {
ddi_translations_edp = bdw_ddi_translations_dp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
......@@ -1722,12 +1730,78 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
phy);
return false;
}
if (phy == DPIO_PHY1 &&
!(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
return false;
}
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
phy);
return false;
}
return true;
}
static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
static void broxton_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
u32 ports, val;
if (broxton_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy == DPIO_PHY0)
dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
if (broxton_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
}
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
} else {
DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
......@@ -1798,6 +1872,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* enabled.
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
* power down the second channel on PHY0 as well.
*
* FIXME: Clarify programming of the following, the register is
* read-only with bit 6 fixed at 0 at least in stepping A.
*/
if (phy == DPIO_PHY1)
val |= OCL2_LDOFUSE_PWR_DIS;
......@@ -1810,12 +1887,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
10))
DRM_ERROR("timeout waiting for PHY1 GRC\n");
broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
......@@ -1825,17 +1900,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
/*
* During PHY1 init delay waiting for GRC calibration to finish, since
* it can happen in parallel with the subsequent PHY0 init.
*/
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
}
void broxton_ddi_phy_init(struct drm_device *dev)
void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
{
/* Enable PHY1 first since it provides Rcomp for PHY0 */
broxton_phy_init(dev->dev_private, DPIO_PHY1);
broxton_phy_init(dev->dev_private, DPIO_PHY0);
broxton_phy_init(dev_priv, DPIO_PHY1);
broxton_phy_init(dev_priv, DPIO_PHY0);
/*
* If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
* PHY1 GRC calibration to finish, so wait for it here.
*/
broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
......@@ -1846,17 +1931,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val &= ~GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
void broxton_ddi_phy_uninit(struct drm_device *dev)
void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
broxton_phy_uninit(dev_priv, DPIO_PHY1);
broxton_phy_uninit(dev_priv, DPIO_PHY0);
}
static bool __printf(6, 7)
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
const char *reg_fmt, ...)
{
struct va_format vaf;
va_list args;
u32 val;
val = I915_READ(reg);
if ((val & mask) == expected)
return true;
va_start(args, reg_fmt);
vaf.fmt = reg_fmt;
vaf.va = &args;
/* FIXME: do this in broxton_phy_uninit per phy */
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
va_end(args);
return false;
}
static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
u32 ports;
uint32_t mask;
bool ok;
#define _CHK(reg, mask, exp, fmt, ...) \
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
/* We expect the PHY to be always enabled */
if (!broxton_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
if (phy == DPIO_PHY0)
ports = BIT(PORT_B) | BIT(PORT_C);
else
ports = BIT(PORT_A);
for_each_port_masked(port, ports) {
int lane;
for (lane = 0; lane < 4; lane++)
ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
LATENCY_OPTIM,
lane != 1 ? LATENCY_OPTIM : 0,
"BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
}
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
"BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy == DPIO_PHY0)
ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
"BXT_PORT_CL2CM_DW6_BC");
/*
* TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
* at least on stepping A this bit is read-only and fixed at 0.
*/
if (phy == DPIO_PHY0) {
u32 grc_code = dev_priv->bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
grc_code;
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
"BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
"BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
}
return ok;
#undef _CHK
}
void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
{
if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
!broxton_phy_verify_state(dev_priv, DPIO_PHY1))
i915_report_error(dev_priv, "DDI PHY state mismatch\n");
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
......@@ -2044,12 +2238,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
{
/* HDMI has nothing special to destroy, so we can go with this. */
intel_dp_encoder_destroy(encoder);
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
......@@ -2068,7 +2256,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
.reset = intel_dp_encoder_reset,
.destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
......@@ -2167,6 +2356,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
......
This diff is collapsed.
......@@ -2215,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
POSTING_READ(DP_A);
udelay(500);
/*
* [DevILK] Work around required when enabling DP PLL
* while a pipe is enabled going to FDI:
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
I915_WRITE(DP_A, intel_dp->DP);
......@@ -2630,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = crtc->pipe;
if (WARN_ON(dp_reg & DP_PORT_EN))
......@@ -2641,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
/*
* We get an occasional spurious underrun between the port
* enable and vdd enable, when enabling port A eDP.
*
* FIXME: Not sure if this applies to (PCH) port D eDP as well
*/
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_dp_enable_port(intel_dp);
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* Underrun reporting for the other pipe was disabled in
* g4x_pre_enable_dp(). The eDP PLL and port have now been
* enabled, so it's now safe to re-enable underrun reporting.
*/
intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
}
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
......@@ -2711,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
intel_dp_prepare(encoder);
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* We get FIFO underruns on the other pipe when
* enabling the CPU eDP PLL, and when enabling CPU
* eDP port. We could potentially avoid the PLL
* underrun with a vblank wait just prior to enabling
* the PLL, but that doesn't appear to help the port
* enable case. Just sweep it all under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
}
/* Only ilk+ has port A */
if (port == PORT_A)
ironlake_edp_pll_on(intel_dp);
......@@ -3806,7 +3776,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
if (!intel_dp->sink_count)
if (!is_edp(intel_dp) && !intel_dp->sink_count)
return false;
/* Check if the panel supports PSR */
......@@ -4339,6 +4309,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
if (is_edp(intel_dp))
return connector_status_connected;
/* if there's no downstream port, we're done */
if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return connector_status_connected;
......@@ -4608,6 +4581,15 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
intel_dp->is_mst,
intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
goto out;
}
......@@ -4665,20 +4647,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
}
out:
if (status != connector_status_connected) {
if ((status != connector_status_connected) &&
(intel_dp->is_mst == false))
intel_dp_unset_edid(intel_dp);
/*
* If we were in MST mode, and device is not there,
* get out of MST mode
*/
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
}
intel_display_power_put(to_i915(dev), power_domain);
return;
......@@ -4851,6 +4822,11 @@ intel_dp_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(dev_priv) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == val) {
/* the eDP scaling property is not changed */
......@@ -4914,7 +4890,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
......@@ -4956,7 +4932,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
......
......@@ -1295,17 +1295,9 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
/*
* Definition of each bit polarity has been changed
* after A1 stepping
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
temp &= ~PORT_PLL_REF_SEL;
else
temp |= PORT_PLL_REF_SEL;
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
......@@ -1652,10 +1644,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
} else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
} else {
} else if (!IS_BROXTON(dev_priv)) {
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
......
......@@ -497,6 +497,11 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
/* DSI PLL registers */
struct {
u32 ctrl, div;
} dsi_pll;
int pipe_bpp;
struct intel_link_m_n dp_m_n;
......@@ -1224,12 +1229,16 @@ void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_device *dev);
void broxton_uninit_cdclk(struct drm_device *dev);
void broxton_ddi_phy_init(struct drm_device *dev);
void broxton_ddi_phy_uninit(struct drm_device *dev);
void broxton_init_cdclk(struct drm_i915_private *dev_priv);
void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
......@@ -1268,6 +1277,8 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
......@@ -1278,6 +1289,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
......@@ -1462,8 +1475,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
......
......@@ -290,16 +290,26 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int ret;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
if (fixed_mode)
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
intel_pch_panel_fitting(crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
......@@ -311,6 +321,12 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
}
ret = intel_compute_dsi_pll(encoder, pipe_config);
if (ret)
return false;
pipe_config->clock_set = true;
return true;
}
......@@ -498,14 +514,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
intel_enable_dsi_pll(encoder);
/*
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
intel_disable_dsi_pll(encoder);
intel_enable_dsi_pll(encoder, crtc->config);
intel_dsi_prepare(encoder);
/* Panel Enable over CRC PMIC */
......@@ -515,19 +536,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled
*/
tmp = I915_READ(DPLL(pipe));
tmp |= DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
intel_crtc->config->dpll_hw_state.dpll =
DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
/* Disable DPOunit clock gating, can stall pipe */
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
......@@ -679,11 +688,16 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
/* Panel Disable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
/*
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
......@@ -716,11 +730,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
/* Due to some hardware limitations on BYT, MIPI Port C DPI
* Enable bit does not get set. To check whether DSI Port C
* was enabled in BIOS, check the Pipe B enable bit
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
if (IS_VALLEYVIEW(dev) && port == PORT_C)
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
......@@ -826,13 +841,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev))
bxt_dsi_get_pipe_config(encoder, pipe_config);
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
*/
pipe_config->dpll_hw_state.dpll_md = 0;
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
pipe_config);
if (!pclk)
return;
......@@ -845,7 +855,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
DRM_DEBUG_KMS("\n");
......@@ -1183,6 +1193,48 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
static int intel_dsi_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_crtc *crtc;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev->mode_config.scaling_mode_property) {
if (val == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(dev) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == val)
return 0;
intel_connector->panel.fitting_mode = val;
}
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
intel_crtc_restore_mode(crtc);
}
return 0;
}
static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
......@@ -1225,11 +1277,25 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.detect = intel_dsi_detect,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dsi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
if (connector->panel.fixed_mode) {
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
}
}
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
......@@ -1353,8 +1419,6 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_connector_register(connector);
drm_panel_attach(intel_dsi->panel, connector);
mutex_lock(&dev->mode_config.mutex);
......@@ -1373,6 +1437,11 @@ void intel_dsi_init(struct drm_device *dev)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_dsi_add_properties(intel_connector);
drm_connector_register(connector);
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
......
......@@ -127,11 +127,15 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
}
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void intel_disable_dsi_pll(struct intel_encoder *encoder);
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config);
void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
......
This diff is collapsed.
......@@ -27,8 +27,34 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
struct drm_i915_gem_request;
/*
* This structure primarily describes the GEM object shared with the GuC.
* The GEM object is held for the entire lifetime of our interaction with
* the GuC, being allocated before the GuC is loaded with its firmware.
* Because there's no way to update the address used by the GuC after
* initialisation, the shared object must stay pinned into the GGTT as
* long as the GuC is in use. We also keep the first page (only) mapped
* into kernel address space, as it includes shared data that must be
* updated on every request submission.
*
* The single GEM object described here is actually made up of several
* separate areas, as far as the GuC is concerned. The first page (kept
* kmap'd) includes the "process decriptor" which holds sequence data for
* the doorbell, and one cacheline which actually *is* the doorbell; a
* write to this will "ring the doorbell" (i.e. send an interrupt to the
* GuC). The subsequent pages of the client object constitute the work
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*
* Finally, we also keep a few statistics here, including the number of
* submissions to each engine, and a record of the last submission failure
* (if any).
*/
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
void *client_base; /* first page (only) of above */
struct intel_context *owner;
struct intel_guc *guc;
uint32_t priority;
......@@ -43,13 +69,14 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
uint32_t wq_head;
uint32_t unused; /* Was 'wq_head' */
/* GuC submission statistics & status */
uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
int spare; /* pad to 32 DWords */
};
enum intel_guc_fw_status {
......
......@@ -1412,8 +1412,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
if (!live_status)
DRM_DEBUG_KMS("Live status not up!");
if (!live_status) {
DRM_DEBUG_KMS("HDMI live status down\n");
/*
* Live status register is not reliable on all intel platforms.
* So consider live_status only for certain platforms, for
* others, read EDID to determine presence of sink.
*/
if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
live_status = true;
}
intel_hdmi_unset_edid(connector);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment