Commit d3e7a0da authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2015-01-17' of git://anongit.freedesktop.org/drm-intel into drm-next

- refactor i915/snd-hda interaction to use the component framework (Imre)
- psr cleanups and small fixes (Rodrigo)
- a few perf w/a from Ken Graunke
- switch to atomic plane helpers (Matt Roper)
- wc mmap support (Chris Wilson & Akash Goel)
- smaller things all over

* tag 'drm-intel-next-2015-01-17' of git://anongit.freedesktop.org/drm-intel: (40 commits)
  drm/i915: Update DRIVER_DATE to 20150117
  i915: reuse %ph to dump small buffers
  drm/i915: Ensure the HiZ RAW Stall Optimization is on for Cherryview.
  drm/i915: Enable the HiZ RAW Stall Optimization on Broadwell.
  drm/i915: PSR link standby at debugfs
  drm/i915: group link_standby setup and let this info visible everywhere.
  drm/i915: Add missing vbt check.
  drm/i915: PSR HSW/BDW: Fix inverted logic at sink main_link_active bit.
  drm/i915: PSR VLV/CHV: Remove condition checks that only applies to Haswell.
  drm/i915: VLV/CHV PSR needs to exit PSR on every flush.
  drm/i915: Fix kerneldoc for i915 atomic plane code
  drm/i915: Don't pretend SDVO hotplug works on 915
  drm/i915: Don't register HDMI connectors for eDP ports on VLV/CHV
  drm/i915: Remove I915_HAS_HOTPLUG() check from i915_hpd_irq_setup()
  drm/i915: Make hpd arrays big enough to avoid out of bounds access
  Revert "drm/i915/chv: Use timeout mode for RC6 on chv"
  drm/i915: Improve HiZ throughput on Cherryview.
  drm/i915: Reset CSB read pointer in ring init
  drm/i915: Drop unused position fields (v2)
  drm/i915: Move to atomic plane helpers (v9)
  ...
parents e4514003 0a0c0018
......@@ -4017,6 +4017,11 @@ int num_ioctls;</synopsis>
framebuffer compression and panel self refresh.
</para>
</sect2>
<sect2>
<title>Atomic Plane Helpers</title>
!Pdrivers/gpu/drm/i915/intel_atomic_plane.c atomic plane helpers
!Idrivers/gpu/drm/i915/intel_atomic_plane.c
</sect2>
<sect2>
<title>Output Probing</title>
<para>
......@@ -4159,6 +4164,17 @@ int num_ioctls;</synopsis>
!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
!Idrivers/gpu/drm/i915/i915_gem_gtt.c
</sect2>
<sect2>
<title>Buffer Object Eviction</title>
<para>
This section documents the interface function for evicting buffer
objects to make space available in the virtual gpu address spaces.
Note that this is mostly orthogonal to shrinking buffer objects
caches, which has the goal to make main memory (shared with the gpu
through the unified memory architecture) available.
</para>
!Idrivers/gpu/drm/i915/i915_gem_evict.c
</sect2>
</sect1>
<sect1>
......
......@@ -66,6 +66,7 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
intel_atomic_plane.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
......
......@@ -2248,6 +2248,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
}
seq_puts(m, "\n");
seq_printf(m, "Link standby: %s\n",
yesno((bool)dev_priv->psr.link_standby));
/* CHV PSR has no kind of performance counter */
if (HAS_PSR(dev) && HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
......
......@@ -143,6 +143,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1;
break;
case I915_PARAM_MMAP_VERSION:
value = 1;
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
......@@ -830,6 +833,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_runtime_pm_enable(dev_priv);
i915_audio_component_init(dev_priv);
return 0;
out_power_well:
......@@ -870,6 +875,8 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
i915_audio_component_cleanup(dev_priv);
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
......@@ -1063,6 +1070,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
......
......@@ -942,8 +942,7 @@ static int i915_pm_suspend(struct device *dev)
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
/*
* We have a suspedn ordering issue with the snd-hda driver also
......@@ -962,8 +961,7 @@ static int i915_pm_suspend_late(struct device *dev)
static int i915_pm_resume_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
......@@ -973,8 +971,7 @@ static int i915_pm_resume_early(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
......
......@@ -55,7 +55,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20141219"
#define DRIVER_DATE "20150117"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
......@@ -83,7 +83,7 @@
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
__WARN_printf(format); \
WARN(1, format); \
else \
DRM_ERROR(format); \
} \
......@@ -94,7 +94,7 @@
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
__WARN_printf("WARN_ON(" #condition ")\n"); \
WARN(1, "WARN_ON(" #condition ")\n"); \
else \
DRM_ERROR("WARN_ON(" #condition ")\n"); \
} \
......@@ -678,6 +678,11 @@ struct i915_ctx_hang_stats {
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
/* If the contexts causes a second GPU hang within this time,
* it is permanently banned from submitting any more work.
*/
unsigned long ban_period_seconds;
/* This context is banned to submit more work */
bool banned;
};
......@@ -784,6 +789,7 @@ struct i915_psr {
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
bool link_standby;
};
enum intel_pch {
......@@ -1409,7 +1415,6 @@ struct intel_vbt_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
u8 controller; /* brightness controller number */
} backlight;
/* MIPI DSI */
......@@ -1768,6 +1773,9 @@ struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
/* hda/i915 audio component */
bool audio_component_registered;
uint32_t hw_context_size;
struct list_head context_list;
......@@ -1853,6 +1861,11 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return dev->dev_private;
}
static inline struct drm_i915_private *dev_to_i915(struct device *dev)
{
return to_i915(dev_get_drvdata(dev));
}
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
......@@ -2892,6 +2905,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
......
......@@ -153,12 +153,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
return 0;
}
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
......@@ -1487,18 +1481,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
goto unref;
if (read_domains & I915_GEM_DOMAIN_GTT) {
if (read_domains & I915_GEM_DOMAIN_GTT)
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
if (ret == -EINVAL)
ret = 0;
} else {
else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
unref:
drm_gem_object_unreference(&obj->base);
......@@ -1563,6 +1549,12 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
if (args->flags & I915_MMAP_WC && !cpu_has_pat)
return -ENODEV;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
......@@ -1578,6 +1570,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
if (args->flags & I915_MMAP_WC) {
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
down_write(&mm->mmap_sem);
vma = find_vma(mm, addr);
if (vma)
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
up_write(&mm->mmap_sem);
}
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
......@@ -2529,7 +2534,8 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
if (ctx->hang_stats.banned)
return true;
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
if (ctx->hang_stats.ban_period_seconds &&
elapsed <= ctx->hang_stats.ban_period_seconds) {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
......@@ -3698,15 +3704,10 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
uint32_t old_write_domain, old_read_domains;
struct i915_vma *vma;
int ret;
/* Not valid to be called on unbound objects. */
if (vma == NULL)
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
......@@ -3715,6 +3716,19 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return ret;
i915_gem_object_retire(obj);
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
* being notified and releasing the pages, we would mistakenly
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_flush_cpu_write_domain(obj, false);
/* Serialise direct access to this object with the barriers for
......@@ -3746,9 +3760,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
&to_i915(obj->base.dev)->gtt.base.inactive_list);
return 0;
}
......
......@@ -222,6 +222,8 @@ __create_hw_context(struct drm_device *dev,
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
return ctx;
err_out:
......@@ -792,3 +794,72 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
return 0;
}
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct intel_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
args->size = 0;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
args->value = ctx->hang_stats.ban_period_seconds;
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&dev->struct_mutex);
return ret;
}
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct intel_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
if (args->size)
ret = -EINVAL;
else if (args->value < ctx->hang_stats.ban_period_seconds &&
!capable(CAP_SYS_ADMIN))
ret = -EPERM;
else
ctx->hang_stats.ban_period_seconds = args->value;
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&dev->struct_mutex);
return ret;
}
......@@ -50,11 +50,12 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @dev: drm_device
* @vm: address space to evict from
* @size: size of the desired free space
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @cache_level: cache_level for the desired space
* @mappable: whether the free space must be mappable
* @nonblocking: whether evicting active objects is allowed or not
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found. Callers must check first whether any such hole exists
......@@ -196,7 +197,6 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
*
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
......@@ -214,6 +214,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
trace_i915_gem_evict_vm(vm);
if (do_idle) {
......@@ -222,6 +223,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return ret;
i915_gem_retire_requests(vm->dev);
WARN_ON(!list_empty(&vm->active_list));
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
......
......@@ -1081,6 +1081,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
{
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
bool need_reloc = false;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
......@@ -1106,6 +1107,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
drm_gem_object_reference(&shadow_batch_obj->base);
i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
list_add_tail(&vma->exec_list, &eb->vmas);
shadow_batch_obj->base.pending_read_domains =
......
......@@ -45,7 +45,7 @@
* and related files, but that will be described in separate chapters.
*/
static const u32 hpd_ibx[] = {
static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
......@@ -53,7 +53,7 @@ static const u32 hpd_ibx[] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};
static const u32 hpd_cpt[] = {
static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
......@@ -61,7 +61,7 @@ static const u32 hpd_cpt[] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
static const u32 hpd_mask_i915[] = {
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
......@@ -70,7 +70,7 @@ static const u32 hpd_mask_i915[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
static const u32 hpd_status_g4x[] = {
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
......@@ -79,7 +79,7 @@ static const u32 hpd_status_g4x[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
......@@ -1522,7 +1522,7 @@ static inline enum port get_port_from_pin(enum hpd_pin pin)
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
u32 dig_hotplug_reg,
const u32 *hpd)
const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
......@@ -4145,26 +4145,24 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
if (I915_HAS_HOTPLUG(dev)) {
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
......@@ -4428,14 +4426,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
......
......@@ -5202,6 +5202,9 @@ enum punit_power_well {
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN 0x7018
# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
......@@ -6167,6 +6170,7 @@ enum punit_power_well {
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN3 0xe184
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
......
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* DOC: atomic plane helpers
*
* The functions here are used by the atomic plane helper functions to
* implement legacy plane updates (i.e., drm_plane->update_plane() and
* drm_plane->disable_plane()). This allows plane updates to use the
* atomic state infrastructure and perform plane updates as separate
* prepare/check/commit/cleanup steps.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
/**
* intel_plane_duplicate_state - duplicate plane state
* @plane: drm plane
*
* Allocates and returns a copy of the plane state (both common and
* Intel-specific) for the specified plane.
*
* Returns: The newly allocated plane state, or NULL or failure.
*/
struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane)
{
struct intel_plane_state *state;
if (plane->state)
state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
else
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
if (state->base.fb)
drm_framebuffer_reference(state->base.fb);
return &state->base;
}
/**
* intel_plane_destroy_state - destroy plane state
* @plane: drm plane
* @state: state object to destroy
*
* Destroys the plane state (both common and Intel-specific) for the
* specified plane.
*/
void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
drm_atomic_helper_plane_destroy_state(plane, state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state);
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
/*
* The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can
* clip/modify ourselves.
*/
intel_state->src.x1 = state->src_x;
intel_state->src.y1 = state->src_y;
intel_state->src.x2 = state->src_x + state->src_w;
intel_state->src.y2 = state->src_y + state->src_h;
intel_state->dst.x1 = state->crtc_x;
intel_state->dst.y1 = state->crtc_y;
intel_state->dst.x2 = state->crtc_x + state->crtc_w;
intel_state->dst.y2 = state->crtc_y + state->crtc_h;
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
intel_state->clip.y2 =
intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
/*
* Disabling a plane is always okay; we just need to update
* fb tracking in a special way since cleanup_fb() won't
* get called by the plane helpers.
*/
if (state->fb == NULL && plane->state->fb != NULL) {
/*
* 'prepare' is never called when plane is being disabled, so
* we need to handle frontbuffer tracking as a special case
*/
intel_crtc->atomic.disabled_planes |=
(1 << drm_plane_index(plane));
}
return intel_plane->check_plane(plane, intel_state);
}
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
/* Don't disable an already disabled plane */
if (!plane->state->fb && !old_state->fb)
return;
intel_plane->commit_plane(plane, intel_state);
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
.atomic_update = intel_plane_atomic_update,
};
......@@ -22,6 +22,9 @@
*/
#include <linux/kernel.h>
#include <linux/component.h>
#include <drm/i915_component.h>
#include "intel_drv.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
......@@ -461,3 +464,110 @@ void intel_init_audio(struct drm_device *dev)
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
}
static void i915_audio_component_get_power(struct device *dev)
{
intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
}
static void i915_audio_component_put_power(struct device *dev)
{
intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
}
/* Get CDCLK in kHz */
static int i915_audio_component_get_cdclk_freq(struct device *dev)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
ret = intel_ddi_get_cdclk_freq(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return ret;
}
static const struct i915_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
.put_power = i915_audio_component_put_power,
.get_cdclk_freq = i915_audio_component_get_cdclk_freq,
};
static int i915_audio_component_bind(struct device *i915_dev,
struct device *hda_dev, void *data)
{
struct i915_audio_component *acomp = data;
if (WARN_ON(acomp->ops || acomp->dev))
return -EEXIST;
acomp->ops = &i915_audio_component_ops;
acomp->dev = i915_dev;
return 0;
}
static void i915_audio_component_unbind(struct device *i915_dev,
struct device *hda_dev, void *data)
{
struct i915_audio_component *acomp = data;
acomp->ops = NULL;
acomp->dev = NULL;
}
static const struct component_ops i915_audio_component_bind_ops = {
.bind = i915_audio_component_bind,
.unbind = i915_audio_component_unbind,
};
/**
* i915_audio_component_init - initialize and register the audio component
* @dev_priv: i915 device instance
*
* This will register with the component framework a child component which
* will bind dynamically to the snd_hda_intel driver's corresponding master
* component when the latter is registered. During binding the child
* initializes an instance of struct i915_audio_component which it receives
* from the master. The master can then start to use the interface defined by
* this struct. Each side can break the binding at any point by deregistering
* its own component after which each side's component unbind callback is
* called.
*
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
dev_priv->audio_component_registered = true;
}
/**
* i915_audio_component_cleanup - deregister the audio component
* @dev_priv: i915 device instance
*
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
return;
component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
dev_priv->audio_component_registered = false;
}
......@@ -314,7 +314,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
const struct bdb_lfp_backlight_control_data *bl_ctrl_data;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
......@@ -327,7 +326,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
entry = &backlight_data->data[panel_type];
bl_ctrl_data = &backlight_data->blc_ctl[panel_type];
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
......@@ -339,30 +337,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
dev_priv->vbt.backlight.controller = 0;
if (bdb->version >= 191) {
dev_priv->vbt.backlight.present =
bl_ctrl_data->pin == BLC_CONTROL_PIN_DDI;
if (!dev_priv->vbt.backlight.present) {
DRM_DEBUG_KMS("BL control pin is not DDI (pin %u)\n",
bl_ctrl_data->pin);
return;
}
if (bl_ctrl_data->controller == 1)
dev_priv->vbt.backlight.controller =
bl_ctrl_data->controller;
}
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
DRM_DEBUG_KMS("VBT BL controller %u\n",
dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
......
......@@ -402,21 +402,10 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3;
} __packed;
#define BLC_CONTROL_PIN_PMIC 0
#define BLC_CONTROL_PIN_LPSS_PWM 1
#define BLC_CONTROL_PIN_DDI 2
#define BLC_CONTROL_PIN_CABC 3
struct bdb_lfp_backlight_control_data {
u8 controller:4;
u8 pin:4;
} __packed;
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
struct bdb_lfp_backlight_control_data blc_ctl[16];
} __packed;
struct aimdb_header {
......
This diff is collapsed.
......@@ -3773,7 +3773,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_stop_link_train(intel_dp);
}
DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
DRM_DEBUG_KMS("got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (handled) {
......@@ -3789,7 +3789,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
goto go_again;
}
} else
......
......@@ -248,9 +248,13 @@ struct intel_plane_state {
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
struct drm_rect orig_src;
struct drm_rect orig_dst;
bool visible;
/*
* used only for sprite planes to determine when to implicitly
* enable/disable the primary plane
*/
bool hides_primary;
};
struct intel_plane_config {
......@@ -415,6 +419,32 @@ struct skl_pipe_wm {
uint32_t linetime;
};
/*
* Tracking of operations that need to be performed at the beginning/end of an
* atomic commit, outside the atomic section where interrupts are disabled.
* These are generally operations that grab mutexes or might otherwise sleep
* and thus can't be run with interrupts disabled.
*/
struct intel_crtc_atomic_commit {
/* vblank evasion */
bool evade;
unsigned start_vbl_count;
/* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc;
bool pre_disable_primary;
bool update_wm;
unsigned disabled_planes;
/* Sleepable operations to perform after commit */
unsigned fb_bits;
bool wait_vblank;
bool update_fbc;
bool post_enable_primary;
unsigned update_sprite_watermarks;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
......@@ -468,6 +498,8 @@ struct intel_crtc {
int scanline_offset;
struct intel_mmio_flip mmio_flip;
struct intel_crtc_atomic_commit atomic;
};
struct intel_plane_wm_parameters {
......@@ -485,10 +517,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
unsigned int rotation;
/* Since we need to change the watermarks before/after
......@@ -542,6 +570,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
......@@ -874,6 +903,8 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_init_audio(struct drm_device *dev);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
bool intel_has_pending_fb_unpin(struct drm_device *dev);
......@@ -1021,6 +1052,7 @@ int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
int intel_disable_plane(struct drm_plane *plane);
void intel_plane_destroy(struct drm_plane *plane);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
......@@ -1214,8 +1246,16 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
void intel_post_enable_primary(struct drm_crtc *crtc);
void intel_pre_disable_primary(struct drm_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
/* intel_atomic.c */
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
#endif /* __INTEL_DRV_H__ */
......@@ -1137,6 +1137,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
ring->next_context_status_buffer = 0;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
......@@ -1394,7 +1395,6 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
ret = i915_cmd_parser_init_ring(ring);
if (ret)
......
......@@ -4681,8 +4681,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
/* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
......@@ -4696,7 +4695,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN7_RC_CTL_TO_MODE;
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
......@@ -5974,6 +5973,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
/* WaSampleCChickenBitEnable:hsw */
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
......
......@@ -143,7 +143,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
bool only_standby = dev_priv->vbt.psr.full_link;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
......@@ -157,16 +156,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
......@@ -226,12 +222,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
dev_priv->vbt.psr.idle_frames + 1 : 2;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
if (dev_priv->psr.link_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
......@@ -270,22 +262,19 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
if (IS_HASWELL(dev) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (IS_HASWELL(dev) &&
intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
out:
dev_priv->psr.source_ok = true;
return true;
}
......@@ -344,6 +333,13 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (!intel_psr_match_conditions(intel_dp))
goto unlock;
/* First we check VBT, but we must respect sink and source
* known restrictions */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
if ((intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) ||
(IS_BROADWELL(dev) && intel_dig_port->port != PORT_A))
dev_priv->psr.link_standby = true;
dev_priv->psr.busy_frontbuffer_bits = 0;
if (HAS_DDI(dev)) {
......@@ -620,13 +616,11 @@ void intel_psr_flush(struct drm_device *dev,
/*
* On Valleyview and Cherryview we don't use hardware tracking so
* sprite plane updates or cursor moves don't result in a PSR
* any plane updates or cursor moves don't result in a PSR
* invalidating. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering. */
if (!HAS_DDI(dev) &&
((frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)) ||
(frontbuffer_bits & INTEL_FRONTBUFFER_CURSOR(pipe))))
if (!HAS_DDI(dev))
intel_psr_exit(dev);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
......
......@@ -796,6 +796,16 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8x4 pixel/sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer."
*
* This optimization is off by default for Broadwell; turn it on.
*/
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw */
WA_SET_BIT_MASKED(CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
......@@ -836,6 +846,14 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
/* According to the CACHE_MODE_0 default value documentation, some
* CHV platforms disable this optimization by default. Turn it on.
*/
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
return 0;
}
......
......@@ -31,7 +31,6 @@
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/i915_powerwell.h>
/**
* DOC: runtime pm
......@@ -50,8 +49,6 @@
* present for a given platform.
*/
static struct i915_power_domains *hsw_pwr;
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
......@@ -1071,10 +1068,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
......@@ -1118,8 +1113,6 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev_priv, true);
hsw_pwr = NULL;
}
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
......@@ -1328,52 +1321,3 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
pm_runtime_put_autosuspend(device);
}
/* Display audio driver power well request */
int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
if (!hsw_pwr)
return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
if (!hsw_pwr)
return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
/*
* Private interface for the audio driver to get CDCLK in kHz.
*
* Caller must request power well using i915_request_power_well() prior to
* making the call.
*/
int i915_get_cdclk_freq(void)
{
struct drm_i915_private *dev_priv;
if (!hsw_pwr)
return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
return intel_ddi_get_cdclk_freq(dev_priv);
}
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
......@@ -1617,6 +1617,9 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct drm_device *dev = intel_sdvo->base.base.dev;
uint16_t hotplug;
if (!I915_HAS_HOTPLUG(dev))
return 0;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
......
This diff is collapsed.
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
struct i915_audio_component {
struct device *dev;
const struct i915_audio_component_ops {
struct module *owner;
void (*get_power)(struct device *);
void (*put_power)(struct device *);
int (*get_cdclk_freq)(struct device *);
} *ops;
};
#endif /* _I915_COMPONENT_H_ */
/**************************************************************************
*
* Copyright 2013 Intel Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#ifndef _I915_POWERWELL_H_
#define _I915_POWERWELL_H_
/* For use by hda_i915 driver */
extern int i915_request_power_well(void);
extern int i915_release_power_well(void);
extern int i915_get_cdclk_freq(void);
#endif /* _I915_POWERWELL_H_ */
......@@ -224,6 +224,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
......@@ -275,6 +277,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
......@@ -341,6 +345,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
#define I915_PARAM_MMAP_VERSION 30
typedef struct drm_i915_getparam {
int param;
......@@ -488,6 +493,14 @@ struct drm_i915_gem_mmap {
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
/**
* Flags for extended behaviour.
*
* Added in version 2.
*/
__u64 flags;
#define I915_MMAP_WC 0x1
};
struct drm_i915_gem_mmap_gtt {
......@@ -1073,4 +1086,12 @@ struct drm_i915_gem_userptr {
__u32 handle;
};
struct drm_i915_gem_context_param {
__u32 ctx_id;
__u32 size;
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
__u64 value;
};
#endif /* _UAPI_I915_DRM_H_ */
......@@ -18,10 +18,12 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/component.h>
#include <drm/i915_component.h>
#include <sound/core.h>
#include <drm/i915_powerwell.h>
#include "hda_priv.h"
#include "hda_i915.h"
#include "hda_intel.h"
/* Intel HSW/BDW display HDA controller Extended Mode registers.
* EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
......@@ -31,32 +33,33 @@
#define AZX_REG_EM4 0x100c
#define AZX_REG_EM5 0x1010
static int (*get_power)(void);
static int (*put_power)(void);
static int (*get_cdclk)(void);
int hda_display_power(bool enable)
int hda_display_power(struct hda_intel *hda, bool enable)
{
if (!get_power || !put_power)
struct i915_audio_component *acomp = &hda->audio_component;
if (!acomp->ops)
return -ENODEV;
pr_debug("HDA display power %s \n",
enable ? "Enable" : "Disable");
dev_dbg(&hda->chip.pci->dev, "display power %s\n",
enable ? "enable" : "disable");
if (enable)
return get_power();
acomp->ops->get_power(acomp->dev);
else
return put_power();
acomp->ops->put_power(acomp->dev);
return 0;
}
void haswell_set_bclk(struct azx *chip)
void haswell_set_bclk(struct hda_intel *hda)
{
int cdclk_freq;
unsigned int bclk_m, bclk_n;
struct i915_audio_component *acomp = &hda->audio_component;
if (!get_cdclk)
if (!acomp->ops)
return;
cdclk_freq = get_cdclk();
cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
switch (cdclk_freq) {
case 337500:
bclk_m = 16;
......@@ -80,51 +83,108 @@ void haswell_set_bclk(struct azx *chip)
break;
}
azx_writew(chip, EM4, bclk_m);
azx_writew(chip, EM5, bclk_n);
azx_writew(&hda->chip, EM4, bclk_m);
azx_writew(&hda->chip, EM5, bclk_n);
}
int hda_i915_init(void)
static int hda_component_master_bind(struct device *dev)
{
int err = 0;
get_power = symbol_request(i915_request_power_well);
if (!get_power) {
pr_warn("hda-i915: get_power symbol get fail\n");
return -ENODEV;
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct i915_audio_component *acomp = &hda->audio_component;
int ret;
ret = component_bind_all(dev, acomp);
if (ret < 0)
return ret;
if (WARN_ON(!(acomp->dev && acomp->ops && acomp->ops->get_power &&
acomp->ops->put_power && acomp->ops->get_cdclk_freq))) {
ret = -EINVAL;
goto out_unbind;
}
put_power = symbol_request(i915_release_power_well);
if (!put_power) {
symbol_put(i915_request_power_well);
get_power = NULL;
return -ENODEV;
/*
* Atm, we don't support dynamic unbinding initiated by the child
* component, so pin its containing module until we unbind.
*/
if (!try_module_get(acomp->ops->owner)) {
ret = -ENODEV;
goto out_unbind;
}
get_cdclk = symbol_request(i915_get_cdclk_freq);
if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */
pr_warn("hda-i915: get_cdclk symbol get fail\n");
return 0;
pr_debug("HDA driver get symbol successfully from i915 module\n");
out_unbind:
component_unbind_all(dev, acomp);
return err;
return ret;
}
int hda_i915_exit(void)
static void hda_component_master_unbind(struct device *dev)
{
if (get_power) {
symbol_put(i915_request_power_well);
get_power = NULL;
}
if (put_power) {
symbol_put(i915_release_power_well);
put_power = NULL;
}
if (get_cdclk) {
symbol_put(i915_get_cdclk_freq);
get_cdclk = NULL;
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct i915_audio_component *acomp = &hda->audio_component;
module_put(acomp->ops->owner);
component_unbind_all(dev, acomp);
WARN_ON(acomp->ops || acomp->dev);
}
static const struct component_master_ops hda_component_master_ops = {
.bind = hda_component_master_bind,
.unbind = hda_component_master_unbind,
};
static int hda_component_master_match(struct device *dev, void *data)
{
/* i915 is the only supported component */
return !strcmp(dev->driver->name, "i915");
}
int hda_i915_init(struct hda_intel *hda)
{
struct component_match *match = NULL;
struct device *dev = &hda->chip.pci->dev;
struct i915_audio_component *acomp = &hda->audio_component;
int ret;
component_match_add(dev, &match, hda_component_master_match, hda);
ret = component_master_add_with_match(dev, &hda_component_master_ops,
match);
if (ret < 0)
goto out_err;
/*
* Atm, we don't support deferring the component binding, so make sure
* i915 is loaded and that the binding successfully completes.
*/
request_module("i915");
if (!acomp->ops) {
ret = -ENODEV;
goto out_master_del;
}
dev_dbg(dev, "bound to i915 component master\n");
return 0;
out_master_del:
component_master_del(dev, &hda_component_master_ops);
out_err:
dev_err(dev, "failed to add i915 component master (%d)\n", ret);
return ret;
}
int hda_i915_exit(struct hda_intel *hda)
{
struct device *dev = &hda->chip.pci->dev;
component_master_del(dev, &hda_component_master_ops);
return 0;
}
......@@ -63,7 +63,7 @@
#include "hda_codec.h"
#include "hda_controller.h"
#include "hda_priv.h"
#include "hda_i915.h"
#include "hda_intel.h"
/* position fix mode */
enum {
......@@ -354,31 +354,6 @@ static char *driver_short_names[] = {
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
struct hda_intel {
struct azx chip;
/* for pending irqs */
struct work_struct irq_pending_work;
/* sync probing */
struct completion probe_wait;
struct work_struct probe_work;
/* card list (for power_save trigger) */
struct list_head list;
/* extra flags */
unsigned int irq_pending_warned:1;
/* VGA-switcheroo setup */
unsigned int use_vga_switcheroo:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
/* secondary power domain for hdmi audio under vga device */
struct dev_pm_domain hdmi_pm_domain;
};
#ifdef CONFIG_X86
static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
{
......@@ -828,7 +803,7 @@ static int azx_suspend(struct device *dev)
pci_save_state(pci);
pci_set_power_state(pci, PCI_D3hot);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
hda_display_power(hda, false);
return 0;
}
......@@ -848,8 +823,8 @@ static int azx_resume(struct device *dev)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
hda_display_power(true);
haswell_set_bclk(chip);
hda_display_power(hda, true);
haswell_set_bclk(hda);
}
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
......@@ -901,7 +876,7 @@ static int azx_runtime_suspend(struct device *dev)
azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
hda_display_power(hda, false);
return 0;
}
......@@ -927,8 +902,8 @@ static int azx_runtime_resume(struct device *dev)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
hda_display_power(true);
haswell_set_bclk(chip);
hda_display_power(hda, true);
haswell_set_bclk(hda);
}
/* Read STATESTS before controller reset */
......@@ -1150,8 +1125,8 @@ static int azx_free(struct azx *chip)
release_firmware(chip->fw);
#endif
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
hda_display_power(false);
hda_i915_exit();
hda_display_power(hda, false);
hda_i915_exit(hda);
}
kfree(hda);
......@@ -1629,8 +1604,12 @@ static int azx_first_init(struct azx *chip)
/* initialize chip */
azx_init_pci(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
haswell_set_bclk(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
struct hda_intel *hda;
hda = container_of(chip, struct hda_intel, chip);
haswell_set_bclk(hda);
}
azx_init_chip(chip, (probe_only[dev] & 2) == 0);
......@@ -1910,13 +1889,10 @@ static int azx_probe_continue(struct azx *chip)
/* Request power well for Haswell HDA controller and codec */
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
#ifdef CONFIG_SND_HDA_I915
err = hda_i915_init();
if (err < 0) {
dev_err(chip->card->dev,
"Error request power-well from i915\n");
err = hda_i915_init(hda);
if (err < 0)
goto out_free;
}
err = hda_display_power(true);
err = hda_display_power(hda, true);
if (err < 0) {
dev_err(chip->card->dev,
"Cannot turn on display power on i915\n");
......
......@@ -13,22 +13,56 @@
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
#ifndef __SOUND_HDA_INTEL_H
#define __SOUND_HDA_INTEL_H
#include <drm/i915_component.h>
#include "hda_priv.h"
struct hda_intel {
struct azx chip;
/* for pending irqs */
struct work_struct irq_pending_work;
/* sync probing */
struct completion probe_wait;
struct work_struct probe_work;
/* card list (for power_save trigger) */
struct list_head list;
/* extra flags */
unsigned int irq_pending_warned:1;
/* VGA-switcheroo setup */
unsigned int use_vga_switcheroo:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
/* secondary power domain for hdmi audio under vga device */
struct dev_pm_domain hdmi_pm_domain;
/* i915 component interface */
struct i915_audio_component audio_component;
};
#ifdef CONFIG_SND_HDA_I915
int hda_display_power(bool enable);
void haswell_set_bclk(struct azx *chip);
int hda_i915_init(void);
int hda_i915_exit(void);
int hda_display_power(struct hda_intel *hda, bool enable);
void haswell_set_bclk(struct hda_intel *hda);
int hda_i915_init(struct hda_intel *hda);
int hda_i915_exit(struct hda_intel *hda);
#else
static inline int hda_display_power(bool enable) { return 0; }
static inline void haswell_set_bclk(struct azx *chip) { return; }
static inline int hda_i915_init(void)
static inline int hda_display_power(struct hda_intel *hda, bool enable)
{
return 0;
}
static inline void haswell_set_bclk(struct hda_intel *hda) { return; }
static inline int hda_i915_init(struct hda_intel *hda)
{
return -ENODEV;
}
static inline int hda_i915_exit(void)
static inline int hda_i915_exit(struct hda_intel *hda)
{
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment